Tensor& addmm_(Tensor& self, const Tensor& mat1, const Tensor& mat2, Scalar beta, Scalar alpha) { // See Note [Multiple dispatch to sparse] auto mat1_sparse = mat1.is_sparse(); if (mat1_sparse) { // inplace is not broadcasting return s_native_addmm_(self, mat1, mat2, beta, alpha); } else { return legacy::th::_th_addmm_(self, mat1, mat2, beta, alpha); } }
Tensor& addmm_(Tensor& self, const Tensor& mat1, const Tensor& mat2, Scalar beta, Scalar alpha) { if (!self.is_cuda()) { // See Note [CPU sparse is globally native] and Note [Multiple dispatch to sparse] auto mat1_sparse = mat1.is_sparse(); if (mat1_sparse) { // inplace is not broadcasting return s_native_addmm_(self, mat1, mat2, beta, alpha); } else { return th_addmm_(self, mat1, mat2, beta, alpha); } } else { return th_addmm_(self, mat1, mat2, beta, alpha); } }