static Tensor dispatch_type(const Tensor & self, const at::Type & type, int device, bool non_blocking) {
  if (type.is_cuda()) {
    torch::utils::cuda_lazy_init();
  }
  AutoNoGIL no_gil;
  AutoGPU auto_gpu(device);
  int64_t tensor_device = self.is_cuda() ? self.get_device() : -1;
  if (self.is_cuda() && type.is_cuda() && tensor_device != at::current_device()) {
    // copy if the devices are different even if the types are the same
    return type.copy(self, non_blocking);
  }
  return self.toType(type, non_blocking);
}
Esempio n. 2
0
void set_default_tensor_type(const at::Type& type) {
  if (!at::isFloatingType(type.scalarType())) {
    throw TypeError("only floating-point types are supported as the default type");
  }
  if (!type.is_variable_or_undefined()) {
    throw TypeError("only variable types are supported");
  }
  if (type.is_sparse()) {
    throw TypeError("only dense types are supported as the default type");
  }

  // get the storage first, so if it doesn't exist we don't change the default tensor type
  THPObjectPtr storage = get_storage_obj(type);
  default_tensor_type = const_cast<Type*>(&type);

  auto torch_module = THPObjectPtr(PyImport_ImportModule("torch"));
  if (!torch_module) throw python_error();

  if (PyObject_SetAttrString(torch_module.get(), "Storage", storage) != 0) {
    // technically, we should undo the change of default tensor type.
    throw python_error();
  }
}
Esempio n. 3
0
at::Device::Type getDeviceType(const at::Type& type) {
  return type.is_cuda() ? at::Device::Type::CUDA : at::Device::Type::CPU;
}