Ejemplo n.º 1
0
Tensor mul(const Tensor& self, const Tensor& other) {
  if (_has_native(self)) {
    Tensor b_self, b_other;
    std::tie(b_self, b_other) = expand_outplace(self, other, "mul");
    return s_native_mul(self, other);
  } else {
    return th_mul(self, other);
  }
}
Ejemplo n.º 2
0
Tensor sub(const Tensor& self, const Tensor& other, Scalar alpha) {
  if (_has_native(self)) {
    Tensor b_self, b_other;
    std::tie(b_self, b_other) = expand_outplace(self, other, "sub");
    return s_native_sub(b_self, b_other, alpha);
  } else {
    return th_sub(self, other, alpha);
  }
}
Ejemplo n.º 3
0
Tensor add(const Tensor& self, const Tensor& other, Scalar alpha) {
  if (!self.is_cuda()) {
    // See Note [CPU sparse is globally native] and Note [Multiple dispatch to sparse]
    auto self_sparse = self.is_sparse();
    auto other_sparse = other.is_sparse();
    if (self_sparse && other_sparse) {
      Tensor b_self, b_other;
      std::tie(b_self, b_other) = expand_outplace(self, other, "add");
      return s_native_add(b_self, b_other, alpha);
    } else if (!self_sparse && other_sparse) {
      return native_add(self, SparseTensorRef(other), alpha);
    } else {
      return th_add(self, other, alpha);
    }
  } else {
    return th_add(self, other, alpha);
  }
}
Ejemplo n.º 4
0
static std::tuple<Tensor, Tensor> makeLinearIndex(Tensor self, TensorList orig) {
  checkIndexTensorTypes(orig);
  // first expand ByteTensor (boolean masks) into 1 or more LongTensors
  auto indices = expandByteTensors(self, orig);
  if (hasEmptyTensor(indices)) {
    return std::make_tuple(self, self.type().toScalarType(kLong).tensor());
  }
  // next broadcast all index tensors together
  indices = expand_outplace(indices);
  // add missing null Tensors so that it matches self.dim()
  while (indices.size() < (size_t)self.dim()) {
    indices.emplace_back();
  }
  // if the non-null indices are not all adjacent, transpose self and indices
  // together so that they're adjacent at the front
  if (!hasContiguousSubspace(indices)) {
    std::tie(self, indices) = transposeToFront(self, indices);
  }
  auto linearIndex = computeLinearIndex(self, indices);
  return std::make_tuple(self, linearIndex);
}
Ejemplo n.º 5
0
Tensor& add_out(Tensor& result, const Tensor& self, const Tensor& other, Scalar alpha) {
  if (!self.is_cuda()) {
    // See Note [CPU sparse is globally native] and Note [Multiple dispatch to sparse]
    auto self_sparse = self.is_sparse();
    auto other_sparse = other.is_sparse();
    if (self_sparse && other_sparse) {
      Tensor b_self, b_other;
      std::tie(b_self, b_other) = expand_outplace(self, other, "add_out");
      return s_native_add_out(result, b_self, b_other, alpha);
    } else if (!self_sparse && other_sparse) {
      // TODO: Perhaps doing overload selection with SparseTensorRef is
      // confusing, and we should have given these overloads different names.
      // For now, we do it this way for consistency with the TH bindings
      // (not that it is terribly consistent anyway).
      return native_add_out(result, self, SparseTensorRef(other), alpha);
    } else {
      return th_add_out(result, self, other, alpha);
    }
  } else {
    return th_add_out(result, self, other, alpha);
  }
}