コード例 #1
0
ファイル: batch_mm.cpp プロジェクト: gtgalone/pytorch
static std::array<int64_t, 2> as_array(at::IntList sizes) {
  JIT_ASSERT(sizes.size() == 2);
  std::array<int64_t, 2> arr;
  arr[0] = sizes[0];
  arr[1] = sizes[1];
  return arr;
}
コード例 #2
0
ファイル: peephole.cpp プロジェクト: Jsmilemsj/pytorch
// First iterate over the 'from' tensor sizes. Ignore all leading and trailing
// dimensions that are simply one, since they can be trivially broadcasted.
// When iterating over the dimension sizes (with reduced 'from' tensor),
// starting at the trailing dimension, the dimension sizes must either be equal,
// or one of them does not exist.
//
// Note that this is NOT equivalent to numpy broadcasting semantics, and do
// not represent that generalized broadcasting that Pytorch implements in
// general. Rather, this is Caffe2-style broadcasting.
bool fusibleExpandTo(at::IntList from, at::IntList to) {
  if (from.size() > to.size()) {
    return false;
  }
  ssize_t from_dim_start = 0, from_dim_end = from.size() - 1;
  while (from_dim_start < (ssize_t) from.size() && from[from_dim_start] == 1) {
    from_dim_start++;
  }
  while (from_dim_end > from_dim_start && from[from_dim_end] == 1) {
    from_dim_end--;
  }

  ssize_t f = from_dim_end;
  ssize_t t = to.size() - 1;
  for (; f >= from_dim_start && t >= 0; --f, --t) {
    if (from[f] != to[t]) return false;
  }

  // In the case that the 'to' tensor has leading ones in the same place that
  // the 'from' tensor does, f will be less than from_dim_start rather than
  // strictly equal. E.x.: to := [5, 1, 768] and from := [1, 1, 768]
  return f <= from_dim_start;
}