Beispiel #1
0
LuaPrimitiveObject append(const thpp::Tensor<T>& val, LuaRefList& refs,
                          thpp::SharingMode sharing) {
  LuaRefObject ref;
  ref.__isset.tensorVal = true;
  val.serialize(ref.tensorVal, thpp::ThriftTensorEndianness::NATIVE, sharing);

  return appendRef(std::move(ref), refs);
}
Beispiel #2
0
LuaObject make(thpp::Tensor<T>& val) {
  LuaRefObject ref;
  ref.__isset.tensorVal = true;
  val.serialize(ref.tensorVal);

  LuaObject obj;
  obj.refs.push_back(std::move(ref));
  obj.value.__isset.refVal = true;
  obj.value.refVal = 0;
  return obj;
}
Beispiel #3
0
PyObjectHandle LuaToPythonConverter::convertTensor(lua_State* L,
                                                   thpp::Tensor<T>& tensor,
                                                   int numpyType) {
  npy_intp zero = 0;
  int ndims;
  std::unique_ptr<npy_intp[]> dims;
  npy_intp* dimsPtr;
  std::unique_ptr<npy_intp[]> strides;

  // Numpy and Torch disagree on empty tensors. In Torch, an empty tensor
  // is a tensor with zero dimensions. In Numpy, a tensor with zero dimensions
  // is a scalar (with one element). So we'll convert an empty Torch tensor
  // to a 1d Numpy tensor of shape [0]. Also see pushTensor in PythonToLua.cpp.
  if (tensor.ndims() != 0) {
    ndims = tensor.ndims();
    auto tsizes = tensor.sizes();
    DCHECK_EQ(tsizes.size(), ndims);

    dims.reset(new npy_intp[ndims]);
    dimsPtr = dims.get();
    std::copy(tsizes.begin(), tsizes.end(), dims.get());

    if (!tensor.isContiguous()) {
      auto tstrides = tensor.strides();
      DCHECK_EQ(tstrides.size(), ndims);

      strides.reset(new npy_intp[ndims]);

      // Numpy strides use bytes; Torch strides use element counts.
      for (int i = 0; i < ndims; ++i) {
        strides[i] = tstrides[i] * sizeof(T);
      }
    }
  } else {
    ndims = 1;
    dimsPtr = &zero;
  }

  PyObjectHandle obj(PyArray_New(
      &PyArray_Type, ndims, dimsPtr, numpyType,
      strides.get(), tensor.data(), 0,
      NPY_ARRAY_ALIGNED, nullptr));
  checkPythonError(obj, L, "create numpy.ndarray of type {}", numpyType);

  // Create a PythonStorage object to hold the reference count.
  // PyArray_SetBaseObject steals the reference to the base object.
  int r = PyArray_SetBaseObject(reinterpret_cast<PyArrayObject*>(obj.get()),
                                PythonStorage<T>::allocate(
                                    L, tensor.storage()).release());
  checkPythonError(r != -1, L, "SetBaseObject on numpy.ndarray");
  return obj;
}