Tensor tensor_new(const Type& type, PyObject* args, PyObject* kwargs) { static PythonArgParser parser({ "new(*, int64_t device=-1)", "new(IntList size, *, int64_t device=-1)", "new(Storage storage)", "new(*, int64_t cdata)|hidden", "new(Tensor other)", "new(PyObject* data, *, int64_t device=-1)", }); PyObject* parsed_args[2]; auto r = parser.parse(args, kwargs, parsed_args); if (r.idx == 0) { AutoGPU auto_gpu(r.toInt64(0)); return type.tensor(); } else if (r.idx == 1) { PyObject* arg = parsed_args[0]; if (!THPSize_Check(arg) && PyTuple_GET_SIZE(args) >= 1 && arg == PyTuple_GET_ITEM(args, 0)) { // new(sequence) binds to this signature but should be treated differently // unless the sequences is a torch.Size return new_from_sequence(type, r.toInt64(1), r.pyobject(0)); } return new_with_sizes(type, r.toInt64(1), r.intlist(0)); } else if (r.idx == 2) { return new_with_storage(type, *r.storage(0)); } else if (r.idx == 3) { auto cdata = reinterpret_cast<void*>(r.toInt64(0)); return type.unsafeTensorFromTH(cdata, true); } else if (r.idx == 4) { return new_with_tensor(type, r.tensor(0)); } else if (r.idx == 5) { return new_from_sequence(type, r.toInt64(1), r.pyobject(0)); } throw std::runtime_error("new(): invalid arguments"); }
static Tensor new_from_sequence(const Type & type, int device, PyObject* data) { auto tensor = new_from_sequence(type.scalarType(), data); if (tensor.type() != type) { AutoNoGIL no_gil; AutoGPU auto_gpu(device); tensor = tensor.toType(type); } return tensor; }
void InputBuffer::add(size_t pos, Variable var) { TORCH_ASSERT(pos < buffer.size()); if (!var.defined()) { return; } auto& old_var = buffer[pos]; if (!old_var.defined()) { buffer[pos] = std::move(var); } else { AutoGPU auto_gpu(var); // ATen doesn't route sparse additions correctly... if (old_var.type().is_sparse()) { buffer[pos] = var + old_var; } else { buffer[pos] = old_var + var; } } }
static Tensor new_with_sizes(const Type& type, int device, IntList sizes) { AutoNoGIL no_gil; AutoGPU auto_gpu(device); return type.tensor(sizes); }