Пример #1
0
Tensor addmm(const Tensor& self, const Tensor& mat1, const Tensor& mat2, Scalar beta, Scalar alpha) {
  // See Note [Multiple dispatch to sparse]
  auto mat1_sparse = mat1.is_sparse();
  if (mat1_sparse) {
    Tensor b_self;
    std::tie(b_self) = expand_size(self, {mat1.size(0), mat2.size(1)}, "addmm");
    return s_native_addmm(b_self, mat1, mat2, beta, alpha);
  } else {
    return legacy::th::_th_addmm(self, mat1, mat2, beta, alpha);
  }
}
Пример #2
0
Tensor addmm(const Tensor& self, const Tensor& mat1, const Tensor& mat2, Scalar beta, Scalar alpha) {
  if (!self.is_cuda()) {
    // See Note [CPU sparse is globally native] and Note [Multiple dispatch to sparse]
    auto mat1_sparse = mat1.is_sparse();
    if (mat1_sparse) {
      Tensor b_self;
      std::tie(b_self) = expand_size(self, {mat1.size(0), mat2.size(1)}, "addmm");
      return s_native_addmm(b_self, mat1, mat2, beta, alpha);
    } else {
      return th_addmm(self, mat1, mat2, beta, alpha);
    }
  } else {
    return th_addmm(self, mat1, mat2, beta, alpha);
  }
}