Exemple #1
0
// returns a vector `ret` such that transposing by `ret` is equivalent
// to transposing by `t1` and then by `t2`
std::vector<int64_t> composeTransposes(const std::vector<int64_t> & t1,
                                       const std::vector<int64_t> & t2) {
  JIT_ASSERT(t1.size() == t2.size());
  std::vector<int64_t> ret;
  for (size_t i = 0; i < t1.size(); i++) {
    JIT_ASSERT(   t1[i]  < int64_t(t2.size()));
    JIT_ASSERT(t2[t1[i]] < int64_t(t2.size()));
    ret.push_back(t2[t1[i]]);
  }
  return ret;
}
Exemple #2
0
    void check_value(const Value* v) {
      scope->insert(v);
      auto b2 = seen_uniques.insert(v->unique());
      JIT_ASSERT(b2.second);  // insertion took place
      JIT_ASSERT(v->unique() < g.next_unique_);

      for (auto use : v->uses()) {
        JIT_ASSERT(!scope->contains(use.user));
        JIT_ASSERT(g.all_nodes.count(use.user) == 1);
        anticipated_uses[use.user]++;  // int default constructs to 0
      }
    }
Exemple #3
0
    void check_graph() {
      node_set all_nodes_set(ALL_OF(g.all_nodes)); // NB: all_nodes is *unordered*

      check_block(g.block_);
      for (auto kv : anticipated_uses) {
        JIT_ASSERT(kv.second == -1);
      }
      // graph->stage() should be equal to max(node.stage for node in graph->nodes())
      if (g.nodes().begin() == g.nodes().end()) {
        JIT_ASSERT(g.stage() == 0);
      } else {
        JIT_ASSERT(g.stage() == g.nodes().rbegin()->stage());
      }
      JIT_ASSERT(std::includes(ALL_OF(sum_set), ALL_OF(all_nodes_set)));
    }
Exemple #4
0
static std::array<int64_t, 2> as_array(at::IntList sizes) {
  JIT_ASSERT(sizes.size() == 2);
  std::array<int64_t, 2> arr;
  arr[0] = sizes[0];
  arr[1] = sizes[1];
  return arr;
}
Exemple #5
0
    void check_block(const Block *b) {
      for (auto input : b->inputs()) {
        check_value(input);
        JIT_ASSERT(input->node()->kind_ == kParam);
      }

      for (auto n : b->nodes()) {
        JIT_ASSERT(n->kind_ != kParam);
        JIT_ASSERT(n->kind_ != kReturn);
        check_node(n);
      }

      JIT_ASSERT(b->output_->kind() == kReturn);
      check_node(b->output_);

      // all_nodes
      // - inputs_, output_ and nodes_ are all included in all_nodes
      // - all_nodes does not contain dead nodes??? (likely to be temporarily
      // suspended).  Weaker: all_nodes contains all inputs and returns
      // - only one return node???

      node_set nodes_set(ALL_OF(b->nodes()));
      node_set inputs_set {b->input_};
      node_set output_set {b->output_};
      // TODO: Make a more type safe std::includes wrapper which disallows use on
      // non-ordered containers
      JIT_ASSERT(std::includes(ALL_OF(all_nodes_set), ALL_OF(nodes_set)));
      JIT_ASSERT(std::includes(ALL_OF(all_nodes_set), ALL_OF(inputs_set)));
      JIT_ASSERT(std::includes(ALL_OF(all_nodes_set), ALL_OF(output_set)));

      sum_set.insert(ALL_OF(nodes_set));
      sum_set.insert(ALL_OF(inputs_set));
      sum_set.insert(ALL_OF(output_set));
    }
Exemple #6
0
// TODO: I'm not entirely sure why this can't be in the header...
bool micropb_callback_string_from_tensor(pb_ostream_t *stream, const pb_field_t *field, void * const *arg) {
  at::Tensor* t = static_cast<at::Tensor*>(*arg);
  JIT_ASSERT(t->is_contiguous());
  // Packed array format!
  pb_encode_tag_for_field(stream, field);
  pb_encode_string(stream, (pb_byte_t*)(t->data_ptr()),  t->type().elementSizeInBytes()*t->numel());

  return true;
}
Exemple #7
0
 void check_node(const Node* n) {
   for (auto input : n->inputs_) {
     if (!scope->contains(input)) {
       JIT_ASSERTM(0, "%%%d not in scope", input->unique());
     }
   }
   JIT_ASSERT(anticipated_uses[n] == static_cast<int64_t>(n->inputs_.size()));
   anticipated_uses[n] = -1;  // we saw the anticipated user!
   scope->insert(n);
   for(auto block : n->blocks()) {
     std::unique_ptr<LintScope> new_scope(new LintScope(std::move(scope)));
     scope = std::move(new_scope);
     check_block(block);
     scope = std::move(scope->parent);
   }
   size_t i = 0;
   for(auto o : n->outputs()) {
     JIT_ASSERT(o->node() == n);
     JIT_ASSERT(i++ == o->offset_);
     check_value(o);
   }
   n->lint();
 }
Exemple #8
0
std::vector<Value*> Method::emit_call_to(SourceRange loc, Method & callee, ArrayRef<Value*> inputs) {
  ensureTensors(loc, inputs);
  JIT_ASSERT(!executor);
  try {
    callee.ensure_defined();
  } catch (RecursiveMethodCallError&) {
    throw ErrorReport(loc) << " method '" << callee.name()
        << "' is called recursively involving this call site. Recursive calls are not supported";
  }
  auto fn = callee.graph();
  ensureSizeMatches(loc, callee.num_inputs(), inputs.size(), "inputs");
  std::vector<Value*> all_inputs = inputs;
  // parameters to callee method (which become parameters to _this_ method
  // if they were not already)
  for(at::Tensor* member : callee.member_inputs) {
    all_inputs.push_back(get_or_add_parameter(member));
  }
  return inlineCallTo(*graph(), *callee.graph(), all_inputs);
}
Exemple #9
0
static void checkSameDevice(const Node* node) {
  bool has_device = false;
  int device;
  auto checkValue = [&](const Value* v) {
    if(TensorType* type = v->type()->cast<TensorType>()) {
      if(!has_device) {
        has_device = true;
        device = type->device();
      } else {
        JIT_ASSERT(device == type->device());
      }
    }
  };
  for(auto input : node->inputs()) {
    checkValue(input);
  }
  for(auto output : node->outputs()) {
    checkValue(output);
  }
}
Exemple #10
0
void fuseBroadcast(std::shared_ptr<Graph>& graph) {
  for(auto n : graph->nodes()) {

    // Can't fuse into nodes that don't support broadcasting
    if (!isBroadcasting(n)) continue;

    // If the node already broadcasts, can't "rebroadcast"
    // TODO: Actually, maybe you can, if there is a broadcast for some
    // dims, and then another broadcast for the rest.  But this will
    // never happen in practice so I didn't implement it.
    if (n->hasAttribute(kbroadcast) && n->i(kbroadcast)) continue;
    JIT_ASSERT(!n->hasAttribute(kaxis));

    auto input_index = n->inputs().size() - 1;
    auto* expanded_rhs = n->input(input_index)->node();

    // The expanded_rhs input isn't actually an expand, so no fusion available
    if (expanded_rhs->kind() != kExpand) continue;

    auto* unexpanded_rhs = expanded_rhs->input();

    // We need to know what the type pre-expand is.  We should basically
    // always have this information (because expands are only ever traced,
    // not generated from symbolic), but if for some reason we don't
    // have it, we need to skip.
    if (!unexpanded_rhs->isTensor()) continue;

    // Not all broadcasts are supported by ONNX broadcast.
    if (!fusibleExpandTo(unexpanded_rhs->type()->expect<TensorType>()->sizes(), // from
                         expanded_rhs->output()->type()->expect<TensorType>()->sizes())   // to
       ) continue;

    n->replaceInput(input_index, unexpanded_rhs);
    n->i_(kbroadcast, 1);
    if (!expanded_rhs->hasUses()) {
      expanded_rhs->destroy();
    }
  }
}
Exemple #11
0
 void insert(const Node * n) {
   JIT_ASSERT(!contains(n));
   nodes.insert(n);
 }
Exemple #12
0
 void insert(const Value * v) {
   JIT_ASSERT(!contains(v));
   values.insert(v);
 }
Exemple #13
0
// NB: This assert is written to assume you don't have any unattached
// nodes.  Unattached nodes can occur while manipulations to the
// graph are occurring.
void Node::lint() const {
  // Node invariants
  // - if node should live in list, nodes_iter is consistent
  // - Inputs are all marked as a use by the nodes they refer to
  // - Stage is consistent (stage is >= all input stages)
  // - Owning graph is non-null and consistent
  // - The "Select" invariant, when the node is MultiReturn
  //
  // The handle invariant:
  //    If a node takes a handle as an input, it is always the
  //    LAST input of the node.  There is at most one handle input.

  {
    size_t i = 0;
    for (auto input : inputs_) {
      // WARNING: O(n^2)
      JIT_ASSERT(std::find(ALL_OF(input->uses_), Use(const_cast<Node*>(this), i)) != input->uses_.end());
      JIT_ASSERT(stage_ >= input->stage_);
      JIT_ASSERT(graph_->all_nodes.count(this) == 1);
      // Handle invariant
      if (i != inputs_.size() - 1) {
        JIT_ASSERT(input->type()->kind() != TypeKind::HandleType);
      }
      i++;
    }
  }

  for(auto o : outputs()) {
    size_t i = 0;
    for (auto use : o->uses()) {
      // Use invariants
      // - Use is consistent with inputs
      // - Every user node is live (checked in Graph)
      JIT_ASSERT(use.user->inputs_[use.offset] == o);
      i++;
    }
  }

  // Node subclass invariants
  // - Return uses is zero
  // - Param inputs is zero
  // - Select inputs is one
  // - Python operator cconv is correct

  IR_IF(this,Constant)
    JIT_ASSERT(inputs_.size() == 0);
  IR_ELSEIF(Return)
    JIT_ASSERT(outputs().size() == 0);
  IR_ELSEIF(Param)
    JIT_ASSERT(inputs_.size() == 0);
  IR_ELSEIFM_CONST(PythonOp)
    std::size_t n_scalars = 0, n_tensors = 0;
    for (auto c : value->cconv) {
      if (c == 's') {
        n_scalars++;
      } else if (c == 't') {
        n_tensors++;
      } else {
        JIT_ASSERT(0);
      }
      JIT_ASSERT(static_cast<bool>(value->pyobj));
    }
    JIT_ASSERT(n_scalars == value->scalar_args.size());
    JIT_ASSERT(n_tensors == inputs_.size());
  IR_ELSEIFM_CONST(CppOp)
    // TODO: add invariants
  IR_ELSEIF(Eval)
    // TODO: add invariants
  // TODO: It's not good for these ops to be top-level, it makes cases longer.
  IR_ELSEIF(FusionGroup)
    checkSameDevice(value);
    // TODO: Typecheck the parameters
    value->g(kSubgraph)->lint();
  IR_END()

}