Tensor& mul_(Tensor& self, const Tensor& other) { if (_has_native(self)) { Tensor b_other; std::tie(b_other) = expand_inplace(self, other, "mul_"); return s_native_mul_(self, b_other); } else { return th_mul_(self, other); } }
Tensor& sub_(Tensor& self, const Tensor& other, Scalar alpha) { if (_has_native(self)) { Tensor b_other; std::tie(b_other) = expand_inplace(self, other, "sub_"); return s_native_sub_(self, b_other, alpha); } else { return th_sub_(self, other, alpha); } }
Tensor & index_put_(Tensor & self, TensorList indices, const Tensor & value) { if (indices.size() > (size_t)self.dim()) { AT_ERROR("too many indices for tensor of dimension ", self.dim(), " (got ", indices.size(), ")"); } Tensor src, linearIndex, expandedValue; std::tie(src, linearIndex) = makeLinearIndex(self, indices); std::tie(expandedValue) = expand_inplace(linearIndex, value); return src.put_(linearIndex, expandedValue); }
Tensor& add_(Tensor& self, const Tensor& other, Scalar alpha) { if (!self.is_cuda()) { // See Note [CPU sparse is globally native] and Note [Multiple dispatch to sparse] auto self_sparse = self.is_sparse(); auto other_sparse = other.is_sparse(); if (self_sparse && other_sparse) { Tensor b_other; std::tie(b_other) = expand_inplace(self, other, "add_"); return s_native_add_(self, b_other, alpha); } else if (!self_sparse && other_sparse) { return native_add_(self, SparseTensorRef(other), alpha); } else { return th_add_(self, other, alpha); } } else { return th_add_(self, other, alpha); } }
Tensor & Type::copy_(Tensor & self, const Tensor & src, bool non_blocking) const { Tensor b_src; std::tie(b_src) = expand_inplace(self, src, "copy"); return s_copy_(self, b_src, non_blocking); }