예제 #1
0
파일: view.cpp 프로젝트: hainm/libdynd
nd::array nd::view(const nd::array &arr, const ndt::type &tp)
{
  if (arr.get_type() == tp) {
    // If the types match exactly, simply return 'arr'
    return arr;
  } else if (tp.get_type_id() == bytes_type_id) {
    // If it's a request to view the data as raw bytes
    nd::array result = view_as_bytes(arr, tp);
    if (!result.is_null()) {
      return result;
    }
  } else if (arr.get_type().get_type_id() == bytes_type_id) {
    // If it's a request to view raw bytes as something else
    nd::array result = view_from_bytes(arr, tp);
    if (!result.is_null()) {
      return result;
    }
  } else if (arr.get_ndim() == tp.get_ndim()) {
    // If the type is symbolic, e.g. has a "Fixed" symbolic dimension,
    // first substitute in the shape from the array
    if (tp.is_symbolic()) {
      dimvector shape(arr.get_ndim());
      arr.get_shape(shape.get());
      return view_concrete(arr, substitute_shape(tp, arr.get_ndim(), shape.get()));
    } else {
      return view_concrete(arr, tp);
    }
  }

  stringstream ss;
  ss << "Unable to view nd::array of type " << arr.get_type();
  ss << " as type " << tp;
  throw type_error(ss.str());
}
예제 #2
0
nd::array dynd::format_json(const nd::array &n, bool struct_as_list)
{
  // Create a UTF-8 string
  nd::array result = nd::empty(ndt::string_type::make());

  // Initialize the output with some memory
  output_data out;
  out.out_string.resize(1024);
  out.out_begin = out.out_string.begin();
  out.out_capacity_end = out.out_string.end();
  out.out_end = out.out_begin;
  out.struct_as_list = struct_as_list;

  if (!n.get_type().is_expression()) {
    ::format_json(out, n.get_type(), n.get_arrmeta(), n.get_readonly_originptr());
  } else {
    nd::array tmp = n.eval();
    ::format_json(out, tmp.get_type(), tmp.get_arrmeta(), tmp.get_readonly_originptr());
  }

  // Shrink the memory to fit, and set the pointers in the output
  string *d = reinterpret_cast<string *>(result.get_readwrite_originptr());
  d->assign(out.out_string.data(), out.out_end - out.out_begin);

  // Finalize processing and mark the result as immutable
  result.get_type().extended()->arrmeta_finalize_buffers(result.get_arrmeta());
  result.flag_as_immutable();

  return result;
}
예제 #3
0
파일: view.cpp 프로젝트: talumbau/libdynd
nd::array nd::view(const nd::array& arr, const ndt::type& tp)
{
    // If the types match exactly, simply return 'arr'
    if (arr.get_type() == tp) {
        return arr;
    } else if (arr.get_ndim() == tp.get_ndim()) {
        // Allocate a result array to attempt the view in it
        array result(make_array_memory_block(tp.get_metadata_size()));
        // Copy the fields
        result.get_ndo()->m_data_pointer = arr.get_ndo()->m_data_pointer;
        if (arr.get_ndo()->m_data_reference == NULL) {
            // Embedded data, need reference to the array
            result.get_ndo()->m_data_reference = arr.get_memblock().release();
        } else {
            // Use the same data reference, avoid producing a chain
            result.get_ndo()->m_data_reference = arr.get_data_memblock().release();
        }
        result.get_ndo()->m_type = ndt::type(tp).release();
        result.get_ndo()->m_flags = arr.get_ndo()->m_flags;
        // Now try to copy the metadata as a view
        if (try_view(arr.get_type(), arr.get_ndo_meta(), tp,
                     result.get_ndo_meta(), arr.get_memblock().get())) {
            // If it succeeded, return it
            return result;
        }
        // Otherwise fall through, let it get destructed, and raise an error
    }

    stringstream ss;
    ss << "Unable to view nd::array of type " << arr.get_type();
    ss << "as type " << tp;
    throw type_error(ss.str());
}
예제 #4
0
nd::array dynd::format_json(const nd::array& n)
{
    // Create a UTF-8 string
    nd::array result = nd::empty(ndt::make_string());

    // Initialize the output with some memory
    output_data out;
    out.blockref = reinterpret_cast<const string_type_metadata *>(result.get_ndo_meta())->blockref;
    out.api = get_memory_block_pod_allocator_api(out.blockref);
    out.api->allocate(out.blockref, 1024, 1, &out.out_begin, &out.out_capacity_end);
    out.out_end = out.out_begin;

    if (!n.get_type().is_expression()) {
        ::format_json(out, n.get_type(), n.get_ndo_meta(), n.get_readonly_originptr());
    } else {
        nd::array tmp = n.eval();
        ::format_json(out, tmp.get_type(), tmp.get_ndo_meta(), tmp.get_readonly_originptr());
    }

    // Shrink the memory to fit, and set the pointers in the output
    string_type_data *d = reinterpret_cast<string_type_data *>(result.get_readwrite_originptr());
    d->begin = out.out_begin;
    d->end = out.out_capacity_end;
    out.api->resize(out.blockref, out.out_end - out.out_begin, &d->begin, &d->end);

    // Finalize processing and mark the result as immutable
    result.get_type().extended()->metadata_finalize_buffers(result.get_ndo_meta());
    result.flag_as_immutable();

    return result;
}
예제 #5
0
inline string broadcast_error_message(const nd::array& dst, const nd::array& src)
{
    vector<intptr_t> dst_shape = dst.get_shape(), src_shape = src.get_shape();
    stringstream ss;

    ss << "cannot broadcast dynd array with type ";
    ss << src.get_type() << " and shape ";
    print_shape(ss, src_shape);
    ss << " to type " << dst.get_type() << " and shape ";
    print_shape(ss, dst_shape);

    return ss.str();
}
예제 #6
0
    // Constructor which creates the output based on the input's broadcast shape
    array_iter(const ndt::type& op0_dtype, nd::array& out_op0, const nd::array& op1, const nd::array& op2, const nd::array& op3) {
        create_broadcast_result(op0_dtype, op1, op2, op3, out_op0, m_iter_ndim[0], m_itershape);
        nd::array ops[4] = {out_op0, op1, op2, op3};
        m_array_tp[0] = out_op0.get_type();
        m_array_tp[1] = op1.get_type();
        m_array_tp[2] = op2.get_type();
        m_array_tp[3] = op3.get_type();
        m_itersize = 1;
        m_iter_ndim[1] = m_array_tp[1].get_ndim();
        m_iter_ndim[2] = m_array_tp[2].get_ndim();
        m_iter_ndim[3] = m_array_tp[3].get_ndim();
        // Allocate and initialize the iterdata
        if (m_iter_ndim[0] != 0) {
            m_iterindex.init(m_iter_ndim[0]);
            memset(m_iterindex.get(), 0, sizeof(intptr_t) * m_iter_ndim[0]);
            // The destination iterdata
            size_t iterdata_size = m_array_tp[0].get_iterdata_size(m_iter_ndim[0]);
            m_iterdata[0] = reinterpret_cast<iterdata_common *>(malloc(iterdata_size));
            if (!m_iterdata[0]) {
                throw std::bad_alloc();
            }
            m_metadata[0] = out_op0.get_ndo_meta();
            m_array_tp[0].iterdata_construct(m_iterdata[0],
                            &m_metadata[0], m_iter_ndim[0], m_itershape.get(), m_uniform_tp[0]);
            m_data[0] = m_iterdata[0]->reset(m_iterdata[0], out_op0.get_readwrite_originptr(), m_iter_ndim[0]);
            // The op iterdata
            for (int i = 1; i < 4; ++i) {
                iterdata_size = m_array_tp[i].get_broadcasted_iterdata_size(m_iter_ndim[i]);
                m_iterdata[i] = reinterpret_cast<iterdata_common *>(malloc(iterdata_size));
                if (!m_iterdata[i]) {
                    throw std::bad_alloc();
                }
                m_metadata[i] = ops[i].get_ndo_meta();
                m_array_tp[i].broadcasted_iterdata_construct(m_iterdata[i],
                                &m_metadata[i], m_iter_ndim[i],
                                m_itershape.get() + (m_iter_ndim[0] - m_iter_ndim[i]), m_uniform_tp[i]);
                m_data[i] = m_iterdata[i]->reset(m_iterdata[i], ops[i].get_ndo()->m_data_pointer, m_iter_ndim[0]);
            }

            for (size_t i = 0, i_end = m_iter_ndim[0]; i != i_end; ++i) {
                m_itersize *= m_itershape[i];
            }
        } else {
            for (size_t i = 0; i < 4; ++i) {
                m_iterdata[i] = NULL;
                m_uniform_tp[i] = m_array_tp[i];
                m_data[i] = ops[i].get_ndo()->m_data_pointer;
                m_metadata[i] = ops[i].get_ndo_meta();
            }
        }
    }
예제 #7
0
파일: view.cpp 프로젝트: hainm/libdynd
static nd::array view_concrete(const nd::array &arr, const ndt::type &tp)
{
  // Allocate a result array to attempt the view in it
  nd::array result(make_array_memory_block(tp.get_arrmeta_size()));
  // Copy the fields
  result.get_ndo()->data.ptr = arr.get_ndo()->data.ptr;
  if (arr.get_ndo()->data.ref == NULL) {
    // Embedded data, need reference to the array
    result.get_ndo()->data.ref = arr.get_memblock().release();
  } else {
    // Use the same data reference, avoid producing a chain
    result.get_ndo()->data.ref = arr.get_data_memblock().release();
  }
  result.get_ndo()->m_type = ndt::type(tp).release();
  result.get_ndo()->m_flags = arr.get_ndo()->m_flags;
  // First handle a special case of viewing outermost "var" as "fixed[#]"
  if (arr.get_type().get_type_id() == var_dim_type_id && tp.get_type_id() == fixed_dim_type_id) {
    const var_dim_type_arrmeta *in_am = reinterpret_cast<const var_dim_type_arrmeta *>(arr.get_arrmeta());
    const var_dim_type_data *in_dat = reinterpret_cast<const var_dim_type_data *>(arr.get_readonly_originptr());
    fixed_dim_type_arrmeta *out_am = reinterpret_cast<fixed_dim_type_arrmeta *>(result.get_arrmeta());
    out_am->dim_size = tp.extended<ndt::fixed_dim_type>()->get_fixed_dim_size();
    out_am->stride = in_am->stride;
    if ((intptr_t)in_dat->size == out_am->dim_size) {
      // Use the more specific data reference from the var arrmeta if possible
      if (in_am->blockref != NULL) {
        memory_block_decref(result.get_ndo()->data.ref);
        memory_block_incref(in_am->blockref);
        result.get_ndo()->data.ref = in_am->blockref;
      }
      result.get_ndo()->data.ptr = in_dat->begin + in_am->offset;
      // Try to copy the rest of the arrmeta as a view
      if (try_view(arr.get_type().extended<ndt::base_dim_type>()->get_element_type(),
                   arr.get_arrmeta() + sizeof(var_dim_type_arrmeta),
                   tp.extended<ndt::base_dim_type>()->get_element_type(),
                   result.get_arrmeta() + sizeof(fixed_dim_type_arrmeta), arr.get_memblock().get())) {
        return result;
      }
    }
  }
  // Otherwise try to copy the arrmeta as a view
  else if (try_view(arr.get_type(), arr.get_arrmeta(), tp, result.get_arrmeta(), arr.get_memblock().get())) {
    // If it succeeded, return it
    return result;
  }

  stringstream ss;
  ss << "Unable to view nd::array of type " << arr.get_type();
  ss << " as type " << tp;
  throw type_error(ss.str());
}
예제 #8
0
 const arrfunc_type *get_is_avail_arrfunc_type() const
 {
   return m_nafunc.get_type()
       .extended<base_tuple_type>()
       ->get_field_type(0)
       .extended<arrfunc_type>();
 }
예제 #9
0
void dynd::parse_json(nd::array &out, const char *json_begin,
                      const char *json_end, const eval::eval_context *ectx)
{
    try {
        const char *begin = json_begin, *end = json_end;
        ndt::type tp = out.get_type();
        ::parse_json(tp, out.get_ndo_meta(), out.get_readwrite_originptr(), begin, end, ectx);
        begin = skip_whitespace(begin, end);
        if (begin != end) {
            throw json_parse_error(begin, "unexpected trailing JSON text", tp);
        }
    } catch (const json_parse_error& e) {
        stringstream ss;
        string line_prev, line_cur;
        int line, column;
        get_error_line_column(json_begin, json_end, e.get_position(),
                        line_prev, line_cur, line, column);
        ss << "Error parsing JSON at line " << line << ", column " << column << "\n";
        if (e.get_type().get_type_id() != uninitialized_type_id) {
            ss << "DType: " << e.get_type() << "\n";
        }
        ss << "Message: " << e.get_message() << "\n";
        print_json_parse_error_marker(ss, line_prev, line_cur, line, column);
        throw runtime_error(ss.str());
    }
}
예제 #10
0
 inline static bool run(nd::array &a)
 {
   const ndt::type &tp = a.get_type();
   if (a.is_immutable() && tp.get_type_id() == fixed_dim_type_id) {
     // It's immutable and "N * <something>"
     const ndt::type &et = tp.extended<fixed_dim_type>()->get_element_type();
     const fixed_dim_type_arrmeta *md =
         reinterpret_cast<const fixed_dim_type_arrmeta *>(a.get_arrmeta());
     if (et.get_type_id() == type_type_id &&
         md->stride == sizeof(ndt::type)) {
       // It also has the right type and is contiguous,
       // so no modification necessary.
       return true;
     }
   }
   // We have to make a copy, check that it's a 1D array, and that
   // it has the same array kind as the requested type.
   if (tp.get_ndim() == 1) {
     // It's a 1D array
     const ndt::type &et = tp.get_type_at_dimension(NULL, 1).value_type();
     if (et.get_type_id() == type_type_id) {
       // It also has the same array type as requested
       nd::array tmp = nd::empty(a.get_dim_size(), ndt::make_type());
       tmp.vals() = a;
       tmp.flag_as_immutable();
       a.swap(tmp);
       return true;
     }
   }
   // It's not compatible, so return false
   return false;
 }
예제 #11
0
uint32_t ndt::categorical_type::get_value_from_category(const nd::array &category) const
{
  nd::array c;
  if (category.get_type() == m_category_tp) {
    // If the type is right, get the category value directly
    c = category;
  }
  else {
    // Otherwise convert to the correct type, then get the category value
    c = nd::empty(m_category_tp);
    c.assign(category);
  }

  intptr_t i = nd::binary_search(m_categories, c).as<intptr_t>();
  if (i < 0) {
    stringstream ss;
    ss << "Unrecognized category value ";
    m_category_tp.print_data(ss, c.get()->metadata(), c.data());
    ss << " assigning to dynd type " << type(this, true);
    throw std::runtime_error(ss.str());
  }
  else {
    return (uint32_t)unchecked_fixed_dim_get<intptr_t>(m_category_index_to_value, i);
  }
}
예제 #12
0
static nd::array array_function_dereference(const nd::array &self)
{
  // Follow the pointers to eliminate them
  ndt::type dt = self.get_type();
  const char *arrmeta = self.get_arrmeta();
  char *data = self.get_ndo()->m_data_pointer;
  memory_block_data *dataref = self.get_ndo()->m_data_reference;
  if (dataref == NULL) {
    dataref = self.get_memblock().get();
  }
  uint64_t flags = self.get_ndo()->m_flags;

  while (dt.get_type_id() == pointer_type_id) {
    const pointer_type_arrmeta *md =
        reinterpret_cast<const pointer_type_arrmeta *>(arrmeta);
    dt = dt.extended<ndt::pointer_type>()->get_target_type();
    arrmeta += sizeof(pointer_type_arrmeta);
    data = *reinterpret_cast<char **>(data) + md->offset;
    dataref = md->blockref;
  }

  // Create an array without the pointers
  nd::array result(make_array_memory_block(dt.get_arrmeta_size()));
  if (!dt.is_builtin()) {
    dt.extended()->arrmeta_copy_construct(result.get_arrmeta(), arrmeta,
                                          &self.get_ndo()->m_memblockdata);
  }
  result.get_ndo()->m_type = dt.release();
  result.get_ndo()->m_data_pointer = data;
  result.get_ndo()->m_data_reference = dataref;
  memory_block_incref(result.get_ndo()->m_data_reference);
  result.get_ndo()->m_flags = flags;
  return result;
}
예제 #13
0
 const arrfunc_type *get_assign_na_arrfunc_type() const
 {
   return m_nafunc.get_type()
       .extended<base_tuple_type>()
       ->get_field_type(1)
       .extended<arrfunc_type>();
 }
예제 #14
0
void dynd::typed_data_assign(const ndt::type &dst_tp, const char *dst_arrmeta,
                             char *dst_data, const nd::array &src_arr,
                             const eval::eval_context *ectx)
{
  typed_data_assign(dst_tp, dst_arrmeta, dst_data, src_arr.get_type(),
                    src_arr.get_arrmeta(), src_arr.get_readonly_originptr(),
                    ectx);
}
예제 #15
0
static nd::array property_ndo_get_groups(const nd::array& n) {
    ndt::type d = n.get_type();
    while (d.get_type_id() != groupby_type_id) {
        d = d.at_single(0);
    }
    const groupby_type *gd = d.extended<groupby_type>();
    return gd->get_groups_type().p("categories");
}
예제 #16
0
 static void set(const ndt::type& paramtype, char *metadata, char *data, const nd::array& value) {
     if (paramtype.get_type_id() == void_pointer_type_id) {
         // TODO: switch to a better mechanism for passing nd::array references
         *reinterpret_cast<const array_preamble **>(data) = value.get_ndo();
     } else {
         typed_data_assign(paramtype, metadata, data, value.get_type(), value.get_ndo_meta(), value.get_ndo()->m_data_pointer);
     }
 }
예제 #17
0
파일: view.cpp 프로젝트: hainm/libdynd
static nd::array view_as_bytes(const nd::array &arr, const ndt::type &tp)
{
  if (arr.get_type().get_flags() & type_flag_destructor) {
    // Can't view arrays of object type
    return nd::array();
  }

  // Get the essential components of the array to analyze
  memory_block_ptr data_ref = arr.get_data_memblock();
  char *data_ptr = arr.get_ndo()->data.ptr;
  ndt::type data_tp = arr.get_type();
  const char *data_meta = arr.get_arrmeta();
  intptr_t data_dim_size = -1, data_stride = 0;
  // Repeatedly refine the data
  while (data_tp.get_type_id() != uninitialized_type_id) {
    refine_bytes_view(data_ref, data_ptr, data_tp, data_meta, data_dim_size, data_stride);
  }
  // Check that it worked, and that the resulting data pointer is aligned
  if (data_dim_size < 0 ||
      !offset_is_aligned(reinterpret_cast<size_t>(data_ptr), tp.extended<ndt::bytes_type>()->get_target_alignment())) {
    // This signals we could not view the data as a
    // contiguous chunk of bytes
    return nd::array();
  }

  char *result_data_ptr = NULL;
  nd::array result(make_array_memory_block(tp.extended()->get_arrmeta_size(), tp.get_data_size(),
                                           tp.get_data_alignment(), &result_data_ptr));
  // Set the bytes extents
  ((char **)result_data_ptr)[0] = data_ptr;
  ((char **)result_data_ptr)[1] = data_ptr + data_dim_size;
  // Set the array arrmeta
  array_preamble *ndo = result.get_ndo();
  ndo->m_type = ndt::type(tp).release();
  ndo->data.ptr = result_data_ptr;
  ndo->data.ref = NULL;
  ndo->m_flags = arr.get_flags();
  // Set the bytes arrmeta
  bytes_type_arrmeta *ndo_meta = reinterpret_cast<bytes_type_arrmeta *>(result.get_arrmeta());
  ndo_meta->blockref = data_ref.release();
  return result;
}
예제 #18
0
uint32_t categorical_type::get_value_from_category(const nd::array& category) const
{
    if (category.get_type() == m_category_tp) {
        // If the type is right, get the category value directly
        return get_value_from_category(category.get_arrmeta(), category.get_readonly_originptr());
    } else {
        // Otherwise convert to the correct type, then get the category value
        nd::array c = nd::empty(m_category_tp);
        c.val_assign(category);
        return get_value_from_category(c.get_arrmeta(), c.get_readonly_originptr());
    }
}
예제 #19
0
    array_iter(const nd::array& op0, const nd::array& op1) {
        nd::array ops[2] = {op0, op1};
        m_array_tp[0] = op0.get_type();
        m_array_tp[1] = op1.get_type();
        m_itersize = 1;
        shortvector<int> axis_perm; // TODO: Use this to affect the iteration order
        broadcast_input_shapes(2, ops, m_iter_ndim, m_itershape, axis_perm);
        // Allocate and initialize the iterdata
        if (m_iter_ndim != 0) {
            m_iterindex.init(m_iter_ndim);
            memset(m_iterindex.get(), 0, sizeof(intptr_t) * m_iter_ndim);
            // The op iterdata
            for (int i = 0; i < 2; ++i) {
                size_t iter_ndim_i = m_array_tp[i].get_ndim();
                size_t iterdata_size = m_array_tp[i].get_broadcasted_iterdata_size(iter_ndim_i);
                m_iterdata[i] = reinterpret_cast<iterdata_common *>(malloc(iterdata_size));
                if (!m_iterdata[i]) {
                    throw std::bad_alloc();
                }
                m_metadata[i] = ops[i].get_ndo_meta();
                m_array_tp[i].broadcasted_iterdata_construct(m_iterdata[i],
                                &m_metadata[i], iter_ndim_i,
                                m_itershape.get() + (m_iter_ndim - iter_ndim_i), m_uniform_tp[i]);
                m_data[i] = m_iterdata[i]->reset(m_iterdata[i], ops[i].get_ndo()->m_data_pointer, m_iter_ndim);
            }

            for (size_t i = 0, i_end = m_iter_ndim; i != i_end; ++i) {
                m_itersize *= m_itershape[i];
            }
        } else {
            for (size_t i = 0; i < 2; ++i) {
                m_iterdata[i] = NULL;
                m_uniform_tp[i] = m_array_tp[i];
                m_data[i] = ops[i].get_ndo()->m_data_pointer;
                m_metadata[i] = ops[i].get_ndo_meta();
            }
        }
    }
예제 #20
0
nd::callable::callable(const nd::array &rhs)
{
  if (!rhs.is_null()) {
    if (rhs.get_type().get_type_id() == callable_type_id) {
      const callable_type_data *af =
          reinterpret_cast<const callable_type_data *>(
              rhs.cdata());
      if (af->instantiate != NULL) {
        // It's valid: callable type, contains instantiate function.
        m_value = rhs;
      } else {
        throw invalid_argument("Require a non-empty callable, "
                               "provided callable has NULL "
                               "instantiate function");
      }
    } else {
      stringstream ss;
      ss << "Cannot implicitly convert nd::array of type "
         << rhs.get_type().value_type() << " to  callable";
      throw type_error(ss.str());
    }
  }
}
예제 #21
0
static void json_as_buffer(const nd::array &json, nd::array &out_tmp_ref,
                           const char *&begin, const char *&end)
{
  // Check the type of 'json', and get pointers to the begin/end of a UTF-8
  // buffer
  ndt::type json_type = json.get_type().value_type();
  switch (json_type.get_kind()) {
  case string_kind: {
    const ndt::base_string_type *sdt =
        json_type.extended<ndt::base_string_type>();
    switch (sdt->get_encoding()) {
    case string_encoding_ascii:
    case string_encoding_utf_8:
      out_tmp_ref = json.eval();
      // The data is already UTF-8, so use the buffer directly
      sdt->get_string_range(&begin, &end, out_tmp_ref.get_arrmeta(),
                            out_tmp_ref.get_readonly_originptr());
      break;
    default: {
      // The data needs to be converted to UTF-8 before parsing
      ndt::type utf8_tp = ndt::string_type::make(string_encoding_utf_8);
      out_tmp_ref = json.ucast(utf8_tp).eval();
      sdt = static_cast<const ndt::base_string_type *>(utf8_tp.extended());
      sdt->get_string_range(&begin, &end, out_tmp_ref.get_arrmeta(),
                            out_tmp_ref.get_readonly_originptr());
      break;
    }
    }
    break;
  }
  case bytes_kind: {
    out_tmp_ref = json.eval();
    const ndt::base_bytes_type *bdt =
        json_type.extended<ndt::base_bytes_type>();
    bdt->get_bytes_range(&begin, &end, out_tmp_ref.get_arrmeta(),
                         out_tmp_ref.get_readonly_originptr());
    break;
  }
  default: {
    stringstream ss;
    ss << "Input for JSON parsing must be either bytes (interpreted as UTF-8) "
          "or a string, not \"" << json_type << "\"";
    throw runtime_error(ss.str());
    break;
  }
  }
}
예제 #22
0
/**
 * Given a buffer array of type "strided * T" which was
 * created by nd::empty, resets it so it can be used
 * as a buffer again.
 *
 * NOTE: If the array is not of type "strided * T" and default
 *       initialized by nd::empty, undefined behavior will result.
 * 
 */
inline void reset_strided_buffer_array(const nd::array& buf)
{
  const ndt::type &buf_tp = buf.get_type();
  base_type_members::flags_type flags = buf_tp.extended()->get_flags();
  if (flags &
      (type_flag_blockref | type_flag_zeroinit | type_flag_destructor)) {
    char *buf_arrmeta = buf.get_ndo()->get_arrmeta();
    char *buf_data = buf.get_readwrite_originptr();
    buf_tp.extended()->arrmeta_reset_buffers(buf.get_ndo()->get_arrmeta());
    strided_dim_type_arrmeta *am =
        reinterpret_cast<strided_dim_type_arrmeta *>(buf_arrmeta);
    if (flags & type_flag_destructor) {
      buf_tp.extended()->data_destruct(buf_arrmeta, buf_data);
    }
    memset(buf_data, 0, am->dim_size * am->stride);
  }
}
예제 #23
0
void dynd::parse_json(nd::array &out, const char *json_begin, const char *json_end, const eval::eval_context *ectx)
{
  try
  {
    const char *begin = json_begin, *end = json_end;
    ndt::type tp = out.get_type();
    ::parse_json(tp, out.get()->metadata(), out.data(), begin, end, ectx);
    begin = skip_whitespace(begin, end);
    if (begin != end) {
      throw json_parse_error(begin, "unexpected trailing JSON text", tp);
    }
  }
  catch (const json_parse_error &e)
  {
    stringstream ss;
    std::string line_prev, line_cur;
    int line, column;
    get_error_line_column(json_begin, json_end, e.get_position(), line_prev, line_cur, line, column);
    ss << "Error parsing JSON at line " << line << ", column " << column << "\n";
    ss << "DyND Type: " << e.get_type() << "\n";
    ss << "Message: " << e.what() << "\n";
    print_json_parse_error_marker(ss, line_prev, line_cur, line, column);
    throw invalid_argument(ss.str());
  }
  catch (const parse::parse_error &e)
  {
    stringstream ss;
    std::string line_prev, line_cur;
    int line, column;
    get_error_line_column(json_begin, json_end, e.get_position(), line_prev, line_cur, line, column);
    ss << "Error parsing JSON at line " << line << ", column " << column << "\n";
    ss << "Message: " << e.what() << "\n";
    print_json_parse_error_marker(ss, line_prev, line_cur, line, column);
    throw invalid_argument(ss.str());
  }
}
예제 #24
0
ndt::categorical_type::categorical_type(const nd::array &categories, bool presorted)
    : base_type(categorical_id, 4, 4, type_flag_none, 0, 0, 0)
{
  intptr_t category_count;
  if (presorted) {
    // This is construction shortcut, for the case when the categories are
    // already
    // sorted. No validation of this is done, the caller should have ensured it
    // was correct already, typically by construction.
    m_categories = categories.eval_immutable();
    m_category_tp = m_categories.get_type().at(0);

    category_count = categories.get_dim_size();
    m_value_to_category_index = nd::range(category_count);
    m_value_to_category_index.flag_as_immutable();
    m_category_index_to_value = m_value_to_category_index;
  }
  else {
    // Process the categories array to make sure it's valid
    const type &cdt = categories.get_type();
    if (cdt.get_id() != fixed_dim_id) {
      throw dynd::type_error("categorical_type only supports construction from "
                             "a fixed-dim array of categories");
    }
    m_category_tp = categories.get_type().at(0);
    if (!m_category_tp.is_scalar()) {
      throw dynd::type_error("categorical_type only supports construction from "
                             "a 1-dimensional strided array of categories");
    }

    category_count = categories.get_dim_size();
    intptr_t categories_stride = reinterpret_cast<const fixed_dim_type_arrmeta *>(categories.get()->metadata())->stride;

    const char *categories_element_arrmeta = categories.get()->metadata() + sizeof(fixed_dim_type_arrmeta);
    nd::kernel_builder k;
    kernel_single_t fn = k.get()->get_function<kernel_single_t>();

    cmp less(fn, k.get());
    set<const char *, cmp> uniques(less);

    m_value_to_category_index = nd::empty(category_count, make_type<intptr_t>());
    m_category_index_to_value = nd::empty(category_count, make_type<intptr_t>());

    // create the mapping from indices of (to be lexicographically sorted)
    // categories to values
    for (size_t i = 0; i != (size_t)category_count; ++i) {
      unchecked_fixed_dim_get_rw<intptr_t>(m_category_index_to_value, i) = i;
      const char *category_value = categories.cdata() + i * categories_stride;

      if (uniques.find(category_value) == uniques.end()) {
        uniques.insert(category_value);
      }
      else {
        stringstream ss;
        ss << "categories must be unique: category value ";
        m_category_tp.print_data(ss, categories_element_arrmeta, category_value);
        ss << " appears more than once";
        throw std::runtime_error(ss.str());
      }
    }
    // TODO: Putting everything in a set already caused a sort operation to
    // occur,
    //       there's no reason we should need a second sort.
    std::sort(&unchecked_fixed_dim_get_rw<intptr_t>(m_category_index_to_value, 0),
              &unchecked_fixed_dim_get_rw<intptr_t>(m_category_index_to_value, category_count),
              sorter(categories.cdata(), categories_stride, fn, k.get()));

    // invert the m_category_index_to_value permutation
    for (intptr_t i = 0; i < category_count; ++i) {
      unchecked_fixed_dim_get_rw<intptr_t>(m_value_to_category_index,
                                           unchecked_fixed_dim_get<intptr_t>(m_category_index_to_value, i)) = i;
    }

    m_categories = make_sorted_categories(uniques, m_category_tp, categories_element_arrmeta);
  }

  // Use the number of categories to set which underlying integer storage to use
  if (category_count <= 256) {
    m_storage_type = make_type<uint8_t>();
  }
  else if (category_count <= 65536) {
    m_storage_type = make_type<uint16_t>();
  }
  else {
    m_storage_type = make_type<uint32_t>();
  }
  this->data_size = m_storage_type.get_data_size();
  this->data_alignment = (uint8_t)m_storage_type.get_data_alignment();
}
/**
 * Adds a ckernel layer for processing one dimension of the reduction.
 * This is for a strided dimension which is being broadcast, and is
 * the final dimension before the accumulation operation.
 */
static size_t make_strided_inner_broadcast_dimension_kernel(
    const callable_type_data *elwise_reduction_const,
    const ndt::callable_type *elwise_reduction_tp,
    const callable_type_data *dst_initialization_const,
    const ndt::callable_type *dst_initialization_tp, void *ckb,
    intptr_t ckb_offset, intptr_t dst_stride, intptr_t src_stride,
    intptr_t src_size, const ndt::type &dst_tp, const char *dst_arrmeta,
    const ndt::type &src_tp, const char *src_arrmeta, bool right_associative,
    const nd::array &reduction_identity, kernel_request_t kernreq,
    const eval::eval_context *ectx)
{
  callable_type_data *elwise_reduction =
      const_cast<callable_type_data *>(elwise_reduction_const);
  callable_type_data *dst_initialization =
      const_cast<callable_type_data *>(dst_initialization_const);

  intptr_t root_ckb_offset = ckb_offset;
  strided_inner_broadcast_kernel_extra *e =
      reinterpret_cast<ckernel_builder<kernel_request_host> *>(ckb)
          ->alloc_ck<strided_inner_broadcast_kernel_extra>(ckb_offset);
  e->destructor = &strided_inner_broadcast_kernel_extra::destruct;
  // Cannot have both a dst_initialization kernel and a reduction identity
  if (dst_initialization != NULL && !reduction_identity.is_null()) {
    throw invalid_argument(
        "make_lifted_reduction_ckernel: cannot specify"
        " both a dst_initialization kernel and a reduction_identity");
  }
  if (reduction_identity.is_null()) {
    // Get the function pointer for the first_call, for the case with
    // no reduction identity
    if (kernreq == kernel_request_single) {
      e->set_first_call_function(
          &strided_inner_broadcast_kernel_extra::single_first);
    } else if (kernreq == kernel_request_strided) {
      e->set_first_call_function(
          &strided_inner_broadcast_kernel_extra::strided_first);
    } else {
      stringstream ss;
      ss << "make_lifted_reduction_ckernel: unrecognized request "
         << (int)kernreq;
      throw runtime_error(ss.str());
    }
  } else {
    // Get the function pointer for the first_call, for the case with
    // a reduction identity
    if (kernreq == kernel_request_single) {
      e->set_first_call_function(
          &strided_inner_broadcast_kernel_extra::single_first_with_ident);
    } else if (kernreq == kernel_request_strided) {
      e->set_first_call_function(
          &strided_inner_broadcast_kernel_extra::strided_first_with_ident);
    } else {
      stringstream ss;
      ss << "make_lifted_reduction_ckernel: unrecognized request "
         << (int)kernreq;
      throw runtime_error(ss.str());
    }
    if (reduction_identity.get_type() != dst_tp) {
      stringstream ss;
      ss << "make_lifted_reduction_ckernel: reduction identity type ";
      ss << reduction_identity.get_type() << " does not match dst type ";
      ss << dst_tp;
      throw runtime_error(ss.str());
    }
    e->ident_data = reduction_identity.get_readonly_originptr();
    e->ident_ref = reduction_identity.get_memblock().release();
  }
  // The function pointer for followup accumulation calls
  e->set_followup_call_function(
      &strided_inner_broadcast_kernel_extra::strided_followup);
  // The striding parameters
  e->dst_stride = dst_stride;
  e->src_stride = src_stride;
  e->size = src_size;
  // Validate that the provided callables are unary operations,
  // and have the correct types
  if (elwise_reduction_tp->get_npos() != 1 &&
      elwise_reduction_tp->get_npos() != 2) {
    stringstream ss;
    ss << "make_lifted_reduction_ckernel: elwise reduction ckernel ";
    ss << "funcproto must be unary or a binary expr with all equal types";
    throw runtime_error(ss.str());
  }
  if (elwise_reduction_tp->get_return_type() != dst_tp) {
    stringstream ss;
    ss << "make_lifted_reduction_ckernel: elwise reduction ckernel ";
    ss << "dst type is " << elwise_reduction_tp->get_return_type();
    ss << ", expected " << dst_tp;
    throw type_error(ss.str());
  }
  if (elwise_reduction_tp->get_pos_type(0) != src_tp) {
    stringstream ss;
    ss << "make_lifted_reduction_ckernel: elwise reduction ckernel ";
    ss << "src type is " << elwise_reduction_tp->get_return_type();
    ss << ", expected " << src_tp;
    throw type_error(ss.str());
  }
  if (dst_initialization != NULL) {
    check_dst_initialization(dst_initialization_tp, dst_tp, src_tp);
  }
  if (elwise_reduction_tp->get_npos() == 2) {
    ckb_offset = kernels::wrap_binary_as_unary_reduction_ckernel(
        ckb, ckb_offset, right_associative, kernel_request_strided);
    ndt::type src_tp_doubled[2] = {src_tp, src_tp};
    const char *src_arrmeta_doubled[2] = {src_arrmeta, src_arrmeta};
    ckb_offset = elwise_reduction->instantiate(
        elwise_reduction->static_data, 0, NULL, ckb, ckb_offset, dst_tp,
        dst_arrmeta, elwise_reduction_tp->get_npos(), src_tp_doubled,
        src_arrmeta_doubled, kernel_request_strided, ectx, nd::array(),
        std::map<nd::string, ndt::type>());
  } else {
    ckb_offset = elwise_reduction->instantiate(
        elwise_reduction->static_data, 0, NULL, ckb, ckb_offset, dst_tp,
        dst_arrmeta, elwise_reduction_tp->get_npos(), &src_tp, &src_arrmeta,
        kernel_request_strided, ectx, nd::array(),
        std::map<nd::string, ndt::type>());
  }
  // Make sure there's capacity for the next ckernel
  reinterpret_cast<ckernel_builder<kernel_request_host> *>(ckb)
      ->reserve(ckb_offset + sizeof(ckernel_prefix));
  // Need to retrieve 'e' again because it may have moved
  e = reinterpret_cast<ckernel_builder<kernel_request_host> *>(ckb)
          ->get_at<strided_inner_broadcast_kernel_extra>(root_ckb_offset);
  e->dst_init_kernel_offset = ckb_offset - root_ckb_offset;
  if (dst_initialization != NULL) {
    ckb_offset = dst_initialization->instantiate(
        dst_initialization->static_data, 0, NULL, ckb, ckb_offset, dst_tp,
        dst_arrmeta, elwise_reduction_tp->get_npos(), &src_tp, &src_arrmeta,
        kernel_request_strided, ectx, nd::array(),
        std::map<nd::string, ndt::type>());
  } else if (reduction_identity.is_null()) {
    ckb_offset =
        make_assignment_kernel(ckb, ckb_offset, dst_tp, dst_arrmeta, src_tp,
                               src_arrmeta, kernel_request_strided, ectx);
  } else {
    ckb_offset = make_assignment_kernel(
        ckb, ckb_offset, dst_tp, dst_arrmeta, reduction_identity.get_type(),
        reduction_identity.get_arrmeta(), kernel_request_strided, ectx);
  }

  return ckb_offset;
}
예제 #26
0
categorical_type::categorical_type(const nd::array& categories, bool presorted)
    : base_type(categorical_type_id, custom_kind, 4, 4, type_flag_scalar, 0, 0, 0)
{
    intptr_t category_count;
    if (presorted) {
        // This is construction shortcut, for the case when the categories are already
        // sorted. No validation of this is done, the caller should have ensured it
        // was correct already, typically by construction.
        m_categories = categories.eval_immutable();
        m_category_tp = m_categories.get_type().at(0);

        category_count = categories.get_dim_size();
        m_value_to_category_index.resize(category_count);
        m_category_index_to_value.resize(category_count);
        for (size_t i = 0; i != (size_t)category_count; ++i) {
            m_value_to_category_index[i] = i;
            m_category_index_to_value[i] = i;
        }

    } else {
        // Process the categories array to make sure it's valid
        const ndt::type& cdt = categories.get_type();
        if (cdt.get_type_id() != strided_dim_type_id) {
            throw dynd::type_error("categorical_type only supports construction from a strided array of categories");
        }
        m_category_tp = categories.get_type().at(0);
        if (!m_category_tp.is_scalar()) {
            throw dynd::type_error("categorical_type only supports construction from a 1-dimensional strided array of categories");
        }

        category_count = categories.get_dim_size();
        intptr_t categories_stride = reinterpret_cast<const strided_dim_type_arrmeta *>(categories.get_arrmeta())->stride;

        const char *categories_element_arrmeta = categories.get_arrmeta() + sizeof(strided_dim_type_arrmeta);
        comparison_ckernel_builder k;
        ::make_comparison_kernel(&k, 0,
                        m_category_tp, categories_element_arrmeta,
                        m_category_tp, categories_element_arrmeta,
                        comparison_type_sorting_less, &eval::default_eval_context);

        cmp less(k.get_function(), k.get());
        set<const char *, cmp> uniques(less);

        m_value_to_category_index.resize(category_count);
        m_category_index_to_value.resize(category_count);

        // create the mapping from indices of (to be lexicographically sorted) categories to values
        for (size_t i = 0; i != (size_t)category_count; ++i) {
            m_category_index_to_value[i] = i;
            const char *category_value = categories.get_readonly_originptr() +
                            i * categories_stride;

            if (uniques.find(category_value) == uniques.end()) {
                uniques.insert(category_value);
            } else {
                stringstream ss;
                ss << "categories must be unique: category value ";
                m_category_tp.print_data(ss, categories_element_arrmeta, category_value);
                ss << " appears more than once";
                throw std::runtime_error(ss.str());
            }
        }
        // TODO: Putting everything in a set already caused a sort operation to occur,
        //       there's no reason we should need a second sort.
        std::sort(m_category_index_to_value.begin(), m_category_index_to_value.end(),
                        sorter(categories.get_readonly_originptr(), categories_stride,
                            k.get_function(), k.get()));

        // invert the m_category_index_to_value permutation
        for (uint32_t i = 0; i < m_category_index_to_value.size(); ++i) {
            m_value_to_category_index[m_category_index_to_value[i]] = i;
        }

        m_categories = make_sorted_categories(uniques, m_category_tp,
                        categories_element_arrmeta);
    }

    // Use the number of categories to set which underlying integer storage to use
    if (category_count <= 256) {
        m_storage_type = ndt::make_type<uint8_t>();
    } else if (category_count <= 65536) {
        m_storage_type = ndt::make_type<uint16_t>();
    } else {
        m_storage_type = ndt::make_type<uint32_t>();
    }
    m_members.data_size = m_storage_type.get_data_size();
    m_members.data_alignment = (uint8_t)m_storage_type.get_data_alignment();
}
예제 #27
0
dynd::nd::array pydynd::nd_fields(const nd::array &n, PyObject *field_list)
{
  vector<std::string> selected_fields;
  pyobject_as_vector_string(field_list, selected_fields);

  // TODO: Move this implementation into dynd
  ndt::type fdt = n.get_dtype();
  if (fdt.get_kind() != struct_kind) {
    stringstream ss;
    ss << "nd.fields must be given a dynd array of 'struct' kind, not ";
    ss << fdt;
    throw runtime_error(ss.str());
  }
  const ndt::struct_type *bsd = fdt.extended<ndt::struct_type>();

  if (selected_fields.empty()) {
    throw runtime_error(
        "nd.fields requires at least one field name to be specified");
  }
  // Construct the field mapping and output field types
  vector<intptr_t> selected_index(selected_fields.size());
  vector<ndt::type> selected__types(selected_fields.size());
  for (size_t i = 0; i != selected_fields.size(); ++i) {
    selected_index[i] = bsd->get_field_index(selected_fields[i]);
    if (selected_index[i] < 0) {
      stringstream ss;
      ss << "field name ";
      print_escaped_utf8_string(ss, selected_fields[i]);
      ss << " does not exist in dynd type " << fdt;
      throw runtime_error(ss.str());
    }
    selected__types[i] = bsd->get_field_type(selected_index[i]);
  }
  // Create the result udt
  ndt::type rudt = ndt::struct_type::make(selected_fields, selected__types);
  ndt::type result_tp = n.get_type().with_replaced_dtype(rudt);
  const ndt::struct_type *rudt_bsd = rudt.extended<ndt::struct_type>();

  // Allocate the new memory block.
  size_t arrmeta_size = result_tp.get_arrmeta_size();
  nd::array result(reinterpret_cast<array_preamble *>(
                       make_array_memory_block(arrmeta_size).get()),
                   true);

  // Clone the data pointer
  result.get()->data = n.get()->data;
  result.get()->owner = n.get()->owner;
  if (!result.get()->owner) {
    result.get()->owner = n.get();
  }

  // Copy the flags
  result.get()->flags = n.get()->flags;

  // Set the type and transform the arrmeta
  result.get()->tp = result_tp;
  // First copy all the array data type arrmeta
  ndt::type tmp_dt = result_tp;
  char *dst_arrmeta = result.get()->metadata();
  const char *src_arrmeta = n.get()->metadata();
  while (tmp_dt.get_ndim() > 0) {
    if (tmp_dt.get_kind() != dim_kind) {
      throw runtime_error(
          "nd.fields doesn't support dimensions with pointers yet");
    }
    const ndt::base_dim_type *budd = tmp_dt.extended<ndt::base_dim_type>();
    size_t offset = budd->arrmeta_copy_construct_onedim(
        dst_arrmeta, src_arrmeta,
        intrusive_ptr<memory_block_data>(n.get(), true));
    dst_arrmeta += offset;
    src_arrmeta += offset;
    tmp_dt = budd->get_element_type();
  }
  // Then create the arrmeta for the new struct
  const size_t *arrmeta_offsets = bsd->get_arrmeta_offsets_raw();
  const size_t *result_arrmeta_offsets = rudt_bsd->get_arrmeta_offsets_raw();
  const size_t *data_offsets = bsd->get_data_offsets(src_arrmeta);
  size_t *result_data_offsets = reinterpret_cast<size_t *>(dst_arrmeta);
  for (size_t i = 0; i != selected_fields.size(); ++i) {
    const ndt::type &dt = selected__types[i];
    // Copy the data offset
    result_data_offsets[i] = data_offsets[selected_index[i]];
    // Copy the arrmeta for this field
    if (dt.get_arrmeta_size() > 0) {
      dt.extended()->arrmeta_copy_construct(
          dst_arrmeta + result_arrmeta_offsets[i],
          src_arrmeta + arrmeta_offsets[selected_index[i]],
          intrusive_ptr<memory_block_data>(n.get(), true));
    }
  }

  return result;
}
void dynd::lift_reduction_arrfunc(arrfunc_type_data *out_ar,
                const nd::arrfunc& elwise_reduction_arr,
                const ndt::type& lifted_arr_type,
                const nd::arrfunc& dst_initialization_arr,
                bool keepdims,
                intptr_t reduction_ndim,
                const bool *reduction_dimflags,
                bool associative,
                bool commutative,
                bool right_associative,
                const nd::array& reduction_identity)
{
    // Validate the input elwise_reduction arrfunc
    if (elwise_reduction_arr.is_null()) {
        throw runtime_error("lift_reduction_arrfunc: 'elwise_reduction' may not be empty");
    }
    const arrfunc_type_data *elwise_reduction = elwise_reduction_arr.get();
    if (elwise_reduction->get_param_count() != 1 &&
            !(elwise_reduction->get_param_count() == 2 &&
              elwise_reduction->get_param_type(0) ==
                  elwise_reduction->get_param_type(1) &&
              elwise_reduction->get_param_type(0) ==
                  elwise_reduction->get_return_type())) {
        stringstream ss;
        ss << "lift_reduction_arrfunc: 'elwise_reduction' must contain a"
              " unary operation ckernel or a binary expr ckernel with all "
              "equal types, its prototype is " << elwise_reduction->func_proto;
        throw invalid_argument(ss.str());
    }

    lifted_reduction_arrfunc_data *self = new lifted_reduction_arrfunc_data;
    *out_ar->get_data_as<lifted_reduction_arrfunc_data *>() = self;
    out_ar->free_func = &delete_lifted_reduction_arrfunc_data;
    self->child_elwise_reduction = elwise_reduction_arr;
    self->child_dst_initialization = dst_initialization_arr;
    if (!reduction_identity.is_null()) {
        if (reduction_identity.is_immutable() &&
                reduction_identity.get_type() == elwise_reduction->get_return_type()) {
            self->reduction_identity = reduction_identity;
        } else {
            self->reduction_identity = nd::empty(elwise_reduction->get_return_type());
            self->reduction_identity.vals() = reduction_identity;
            self->reduction_identity.flag_as_immutable();
        }
    }

    // Figure out the result type
    ndt::type lifted_dst_type = elwise_reduction->get_return_type();
    for (intptr_t i = reduction_ndim - 1; i >= 0; --i) {
        if (reduction_dimflags[i]) {
            if (keepdims) {
                lifted_dst_type = ndt::make_strided_dim(lifted_dst_type);
            }
        } else {
            ndt::type subtype = lifted_arr_type.get_type_at_dimension(NULL, i);
            switch (subtype.get_type_id()) {
                case strided_dim_type_id:
                case cfixed_dim_type_id:
                    lifted_dst_type = ndt::make_strided_dim(lifted_dst_type);
                    break;
                case var_dim_type_id:
                    lifted_dst_type = ndt::make_var_dim(lifted_dst_type);
                    break;
                default: {
                    stringstream ss;
                    ss << "lift_reduction_arrfunc: don't know how to process ";
                    ss << "dimension of type " << subtype;
                    throw type_error(ss.str());
                }
            }
        }
    }
    self->data_types[0] = lifted_dst_type;
    self->data_types[1] = lifted_arr_type;
    self->reduction_ndim = reduction_ndim;
    self->associative = associative;
    self->commutative = commutative;
    self->right_associative = right_associative;
    self->reduction_dimflags.init(reduction_ndim);
    memcpy(self->reduction_dimflags.get(), reduction_dimflags, sizeof(bool) * reduction_ndim);

    out_ar->instantiate = &instantiate_lifted_reduction_arrfunc_data;
    out_ar->func_proto = ndt::make_funcproto(lifted_arr_type, lifted_dst_type);
}
예제 #29
0
static size_t array_param(const nd::array& n) {
    return n.get_type().get_ndim();
}
예제 #30
0
 array_iter(const nd::array& op0) {
     init(op0.get_type(), op0.get_ndo_meta(), op0.get_readwrite_originptr());
 }