static void py_bind_tensor_types(const std::vector<PyTensorType>& tensor_types) { auto torch_module = THPObjectPtr(PyImport_ImportModule("torch")); if (!torch_module) throw python_error(); auto tensor_classes = THPObjectPtr(PyObject_GetAttrString(torch_module.get(), "_tensor_classes")); if (!tensor_classes) throw python_error(); for (auto& tensor_type : tensor_types) { auto name = std::string(tensor_type.name); auto idx = name.rfind("."); auto type_name = name.substr(idx + 1); auto module_name = name.substr(0, idx); auto module_obj = THPObjectPtr(PyImport_ImportModule(module_name.c_str())); if (!module_obj) throw python_error(); PyObject* type_obj = (PyObject*)&tensor_type; Py_INCREF(type_obj); if (PyModule_AddObject(module_obj.get(), type_name.c_str(), type_obj) < 0) { throw python_error(); } if (PySet_Add(tensor_classes.get(), type_obj) < 0) { throw python_error(); } } }
PyObject* createPyObject(const at::Storage& storage) { auto type = getPyTypeObject(storage); auto obj = THPObjectPtr(type->tp_alloc(type, 0)); if (!obj) throw python_error(); ((THPVoidStorage*)obj.get())->cdata = (THVoidStorage *)storage.unsafeGetTH(true); return obj.release(); }
static THPObjectPtr get_storage_obj(const Type& type) { auto module_name = get_module(type.backend()); auto module_obj = THPObjectPtr(PyImport_ImportModule(module_name)); if (!module_obj) throw python_error(); auto storage_name = std::string(at::toString(type.scalarType())) + "Storage"; THPObjectPtr storage(PyObject_GetAttrString(module_obj.get(), storage_name.c_str())); if (!storage.get()) { throw TypeError("couldn't find storage object %s", storage_name.c_str()); } return storage; }
static THPObjectPtr get_tensor_dict() { auto torch = THPObjectPtr(PyImport_ImportModule("torch")); if (!torch) throw python_error(); auto tensor_class = THPObjectPtr(PyObject_GetAttrString(torch, "Tensor")); if (!tensor_class) throw python_error(); auto tensor_type = (PyTypeObject*)tensor_class.get(); TORCH_ASSERTM(tensor_type->tp_base, "missing base type for Tensor"); auto res = THPObjectPtr(PyDict_New()); if (!res) throw python_error(); if (PyDict_Merge(res.get(), tensor_type->tp_dict, 0) < 0) { throw python_error(); } if (PyDict_Merge(res.get(), tensor_type->tp_base->tp_dict, 0) < 0) { throw python_error(); } return res; }
void PythonOp::cloneFrom(Node * other_) { Node::cloneFrom(other_); auto other = other_->cast<PythonOp>(); this->cconv = other->cconv; this->is_legacy = other->is_legacy; Py_INCREF(other->pyobj.get()); this->pyobj = THPObjectPtr(other->pyobj.get()); this->var_flags = other->var_flags; for(auto & sa : other->scalar_args) { Py_INCREF(sa.get()); this->scalar_args.emplace_back(sa.get()); } }
static std::vector<int64_t> compute_sizes(PyObject* seq) { std::vector<int64_t> sizes; THPObjectPtr handle; do { auto length = PySequence_Length(seq); if (length < 0) throw python_error(); sizes.push_back(length); if (sizes.size() > MAX_DIMS) { throw ValueError("too many dimensions '%s'", Py_TYPE(seq)->tp_name); } if (length == 0) break; handle = THPObjectPtr(PySequence_GetItem(seq, 0)); seq = handle.get(); } while (PySequence_Check(seq)); return sizes; }
void set_default_tensor_type(const at::Type& type) { if (!at::isFloatingType(type.scalarType())) { throw TypeError("only floating-point types are supported as the default type"); } if (!type.is_variable_or_undefined()) { throw TypeError("only variable types are supported"); } if (type.is_sparse()) { throw TypeError("only dense types are supported as the default type"); } // get the storage first, so if it doesn't exist we don't change the default tensor type THPObjectPtr storage = get_storage_obj(type); default_tensor_type = const_cast<Type*>(&type); auto torch_module = THPObjectPtr(PyImport_ImportModule("torch")); if (!torch_module) throw python_error(); if (PyObject_SetAttrString(torch_module.get(), "Storage", storage) != 0) { // technically, we should undo the change of default tensor type. throw python_error(); } }
static void recursive_store(char* data, IntList sizes, IntList strides, int64_t dim, ScalarType scalarType, int elementSize, PyObject* obj) { int64_t ndim = sizes.size(); if (dim == ndim) { torch::utils::store_scalar(data, scalarType, obj); return; } auto n = sizes[dim]; auto seq = THPObjectPtr(PySequence_Fast(obj, "not a sequence")); if (!seq) throw python_error(); auto seq_size = PySequence_Fast_GET_SIZE(seq.get()); if (seq_size != n) { throw ValueError("expected sequence of length %lld at dim %lld (got %lld)", (long long)n, (long long)dim, (long long)seq_size); } PyObject** items = PySequence_Fast_ITEMS(seq.get()); for (int64_t i = 0; i < n; i++) { recursive_store(data, sizes, strides, dim + 1, scalarType, elementSize, items[i]); data += strides[dim] * elementSize; } }