Exemplo n.º 1
0
void renumber_registers(IRCode* code, bool width_aware) {
  auto chains = calculate_ud_chains(code);

  Rank rank;
  Parent parent;
  DefSets def_sets((RankPMap(rank)), (ParentPMap(parent)));
  for (const auto& mie : InstructionIterable(code)) {
    if (mie.insn->dests_size()) {
      def_sets.make_set(mie.insn);
    }
  }
  unify_defs(chains, &def_sets);
  SymRegMapper sym_reg_mapper(width_aware);
  for (auto& mie : InstructionIterable(code)) {
    auto insn = mie.insn;
    if (insn->dests_size()) {
      auto sym_reg = sym_reg_mapper.make(def_sets.find_set(insn));
      insn->set_dest(sym_reg);
    }
  }
  for (auto& mie : InstructionIterable(code)) {
    auto insn = mie.insn;
    for (size_t i = 0; i < insn->srcs_size(); ++i) {
      auto& defs = chains.at(Use{insn, insn->src(i)});
      insn->set_src(i, sym_reg_mapper.at(def_sets.find_set(*defs.begin())));
    }
  }
  code->set_registers_size(sym_reg_mapper.regs_size());
}
Exemplo n.º 2
0
static size_t num_opcodes_bb(cfg::Block* block) {
  size_t result = 0;
  for (auto i : InstructionIterable(block)) {
    ++result;
  }
  return result;
}
Exemplo n.º 3
0
/*
 * This exists because in the absence of a register allocator, we need each
 * transformation to keep the ins registers at the end of the frame. Once the
 * register allocator is switched on this function should no longer have many
 * use cases.
 */
size_t sum_param_sizes(const IRCode* code) {
  size_t size {0};
  auto param_ops = code->get_param_instructions();
  for (auto& mie : InstructionIterable(&param_ops)) {
    size += mie.insn->dest_is_wide() ? 2 : 1;
  }
  return size;
}
Exemplo n.º 4
0
int count_ops(cfg::ControlFlowGraph& cfg, IROpcode op) {
  size_t result = 0;
  for (const auto& mie : InstructionIterable(cfg)) {
    if (mie.insn->opcode() == op) {
      result++;
    }
  }
  return result;
}
Exemplo n.º 5
0
int count_ifs(cfg::ControlFlowGraph& cfg) {
  size_t num_ifs = 0;
  for (const auto& mie : InstructionIterable(cfg)) {
    if (is_conditional_branch(mie.insn->opcode())) {
      num_ifs++;
    }
  }
  return num_ifs;
}
Exemplo n.º 6
0
TEST(PropagationTest1, localDCE1) {
  g_redex = new RedexContext();

  const char* dexfile = std::getenv("dexfile");
  ASSERT_NE(nullptr, dexfile);

  std::vector<DexStore> stores;
  DexMetadata dm;
  dm.set_id("classes");
  DexStore root_store(dm);
  root_store.add_classes(load_classes_from_dex(dexfile));
  DexClasses& classes = root_store.get_dexen().back();
  stores.emplace_back(std::move(root_store));
  std::cout << "Loaded classes: " << classes.size() << std::endl ;

  TRACE(DCE, 2, "Code before:\n");
  for(const auto& cls : classes) {
    TRACE(DCE, 2, "Class %s\n", SHOW(cls));
    for (const auto& dm : cls->get_dmethods()) {
      TRACE(DCE, 2, "dmethod: %s\n",  dm->get_name()->c_str());
      if (strcmp(dm->get_name()->c_str(), "propagate") == 0) {
        TRACE(DCE, 2, "dmethod: %s\n",  SHOW(dm->get_code()));
      }
    }
  }

  std::vector<Pass*> passes = {
    new PeepholePass(),
    new LocalDcePass(),
  };

  PassManager manager(passes);
  manager.set_testing_mode();

  Json::Value conf_obj = Json::nullValue;
  ConfigFiles dummy_cfg(conf_obj);
  manager.run_passes(stores, dummy_cfg);

  TRACE(DCE, 2, "Code after:\n");
  for(const auto& cls : classes) {
    TRACE(DCE, 2, "Class %s\n", SHOW(cls));
    for (const auto& dm : cls->get_dmethods()) {
      TRACE(DCE, 2, "dmethod: %s\n",  dm->get_name()->c_str());
      if (strcmp(dm->get_name()->c_str(), "propagate") == 0) {
        TRACE(DCE, 2, "dmethod: %s\n",  SHOW(dm->get_code()));
        for (auto& mie : InstructionIterable(dm->get_code())) {
          auto instruction = mie.insn;
          // Make sure there is no invoke-virtual in the optimized method.
          ASSERT_NE(instruction->opcode(), OPCODE_INVOKE_VIRTUAL);
          // Make sure there is no const-class in the optimized method.
          ASSERT_NE(instruction->opcode(), OPCODE_CONST_CLASS);
        }
      }
    }
  }

}
Exemplo n.º 7
0
int count_sgets(cfg::ControlFlowGraph& cfg) {
  int sgets = 0;
  for (auto& mie : InstructionIterable(cfg)) {
    TRACE(RME, 1, "%s\n", SHOW(mie.insn));
    if (is_sget(mie.insn->opcode())) {
      sgets++;
    }
  }
  return sgets;
}
Exemplo n.º 8
0
int count_igets(cfg::ControlFlowGraph& cfg, std::string field_name) {
  size_t num_igets = 0;
  for (const auto& mie : InstructionIterable(cfg)) {
    if (is_iget(mie.insn->opcode()) &&
        mie.insn->get_field()->get_name()->str() == field_name) {
      num_igets++;
    }
  }
  return num_igets;
}
Exemplo n.º 9
0
boost::optional<ParamIndex> find_return_param_index(
    cfg::ControlFlowGraph& cfg) {
  for (auto& mie : InstructionIterable(cfg)) {
    TRACE(RP, 2, "  %s\n", SHOW(mie.insn));
  }
  // find register that is being returned (if any)
  cfg.calculate_exit_block();
  auto exit_block = cfg.exit_block();
  auto it = exit_block->rbegin();
  if (it == exit_block->rend() || !is_return_value(it->insn->opcode()))
    return boost::none;
  auto return_reg = it->insn->src(0);
  TRACE(RP, 2, "  returns v%d\n", return_reg);
  ++it;
  if (it == exit_block->rend() || !is_move(it->insn->opcode()))
    return boost::none;
  auto src_reg = it->insn->src(0);
  TRACE(RP, 2, "  move v%d, v%d\n", it->insn->dest(), src_reg);
  if (it->insn->dest() != return_reg) return boost::none;
  // let's see if it came from a unique load-param
  IRInstruction* load_param = nullptr;
  for (auto& mie : InstructionIterable(cfg)) {
    if (mie.insn->dests_size()) {
      if (mie.insn->dest() == src_reg) {
        if (opcode::is_load_param(mie.insn->opcode())) {
          load_param = mie.insn;
        } else {
          TRACE(RP, 2, "  move_reg clobbered\n");
          return boost::none;
        }
      }
    }
  }
  if (load_param != nullptr) {
    ParamIndex param_index = get_load_param_map(cfg).at(load_param);
    TRACE(RP, 2, "  found matching load-param %d\n", param_index);
    return param_index;
  } else {
    TRACE(RP, 2, "  did not find matching load-param\n");
    return boost::none;
  }
}
Exemplo n.º 10
0
bool no_invoke_super(const DexMethod* method) {
  auto code = method->get_code();
  always_assert(code);

  for (const auto& mie : InstructionIterable(code)) {
    auto insn = mie.insn;
    if (insn->opcode() == OPCODE_INVOKE_SUPER) {
      return false;
    }
  }

  return true;
}
Exemplo n.º 11
0
/*
 * Record the environment before the execution of every instruction. We need
 * this data during the backwards used vars analysis.
 */
static std::unordered_map<const IRInstruction*, ptrs::Environment>
gen_instruction_environment_map(const cfg::ControlFlowGraph& cfg,
                                const ptrs::FixpointIterator& fp_iter) {
  std::unordered_map<const IRInstruction*, ptrs::Environment> result;
  for (auto* block : cfg.blocks()) {
    auto env = fp_iter.get_entry_state_at(block);
    for (auto& mie : InstructionIterable(block)) {
      auto* insn = mie.insn;
      result.emplace(insn, env);
      fp_iter.analyze_instruction(insn, &env);
    }
  }
  return result;
}
Exemplo n.º 12
0
void MethodCreator::load_locals(DexMethod* meth) {
  auto ii = InstructionIterable(
      meth->get_code()->get_param_instructions());
  auto it = ii.begin();
  if (!is_static(meth)) {
    make_local_at(meth->get_class(), it->insn->dest());
    ++it;
  }
  auto proto = meth->get_proto();
  auto args = proto->get_args();
  if (args) {
    for (auto arg : args->get_type_list()) {
      make_local_at(arg, it->insn->dest());
      ++it;
    }
  }
  always_assert(it == ii.end());
}
Exemplo n.º 13
0
bool passes_args_through(IRInstruction* insn,
                         const IRCode& code,
                         int ignore /* = 0 */
) {
  size_t src_idx{0};
  size_t param_count{0};
  for (const auto& mie :
       InstructionIterable(code.get_param_instructions())) {
    auto load_param = mie.insn;
    ++param_count;
    if (src_idx >= insn->srcs_size()) {
      continue;
    }
    if (load_param->dest() != insn->src(src_idx++)) {
      return false;
    }
  }
  return insn->srcs_size() + ignore == param_count;
}
Exemplo n.º 14
0
std::vector<DexType*> RemoveBuildersPass::created_builders(DexMethod* m) {
  always_assert(m != nullptr);

  std::vector<DexType*> builders;
  auto code = m->get_code();
  if (!code) {
    return builders;
  }
  for (auto& mie : InstructionIterable(code)) {
    auto insn = mie.insn;
    if (insn->opcode() == OPCODE_NEW_INSTANCE) {
      DexType* cls = insn->get_type();
      if (m_builders.find(cls) != m_builders.end()) {
        builders.emplace_back(cls);
      }
    }
  }
  return builders;
}
Exemplo n.º 15
0
// Check that visibility / accessibility changes to the current method
// won't need to change a referenced method into a virtual or static one.
bool gather_invoked_methods_that_prevent_relocation(
    const DexMethod* method,
    std::unordered_set<DexMethodRef*>* methods_preventing_relocation) {
  auto code = method->get_code();
  always_assert(code);

  bool can_relocate = true;
  for (const auto& mie : InstructionIterable(code)) {
    auto insn = mie.insn;
    auto opcode = insn->opcode();
    if (is_invoke(opcode)) {
      auto meth = resolve_method(insn->get_method(), opcode_to_search(insn));
      if (!meth && opcode == OPCODE_INVOKE_VIRTUAL &&
          unknown_virtuals::is_method_known_to_be_public(insn->get_method())) {
        continue;
      }
      if (meth) {
        always_assert(meth->is_def());
        if (meth->is_external() && !is_public(meth)) {
          meth = nullptr;
        } else if (opcode == OPCODE_INVOKE_DIRECT && !is_init(meth)) {
          meth = nullptr;
        }
      }
      if (!meth) {
        can_relocate = false;
        if (!methods_preventing_relocation) {
          break;
        }
        methods_preventing_relocation->emplace(insn->get_method());
      }
    }
  }

  return can_relocate;
}
Exemplo n.º 16
0
 void exclude_referenced_bridgee(DexMethod* code_method, IRCode& code) {
   for (auto& mie : InstructionIterable(&code)) {
     auto inst = mie.insn;
     if (!is_invoke(inst->opcode())) continue;
     auto method = inst->get_method();
     auto range = m_potential_bridgee_refs.equal_range(
         MethodRef(method->get_class(), method->get_name(),
             method->get_proto()));
     for (auto it = range.first; it != range.second; ++it) {
       auto referenced_bridge = it->second;
       // Don't count the bridge itself
       if (referenced_bridge == code_method) continue;
       TRACE(BRIDGE,
             5,
             "Rejecting, reference `%s.%s.%s' in `%s' blocks `%s'\n",
             SHOW(method->get_class()),
             SHOW(method->get_name()),
             SHOW(method->get_proto()),
             SHOW(code_method),
             SHOW(referenced_bridge));
       m_bridges_to_bridgees.erase(referenced_bridge);
     }
   }
 }
Exemplo n.º 17
0
/*
 * Build the interference graph by adding edges between nodes that are
 * simultaneously live.
 *
 * check-cast instructions have to be handled specially. They are represented
 * with both a dest (via a move-result-pseudo) and a src in our IR. However, in
 * actual Dex bytecode, it only takes a single operand which acts as both src
 * and dest. So when converting IR to Dex bytecode, we need to insert a move
 * instruction if the src and dest operands differ. We must insert the move
 * before, not after, the check-cast. Suppose we did not:
 *
 *        IR                  |           Dex
 *   sget-object v0 LFoo;     |  sget-object v0 LFoo;
 *   check-cast v0 LBar;      |  check-cast v0 LBar;
 *   move-result-pseudo v1    |  move-object v1 v0
 *   invoke-static v0 LFoo.a; |  invoke-static v0 LFoo.a; // v0 is of type Bar!
 *
 * However, inserting before the check-cast is tricky to get right. If the
 * check-cast is in a try region, we must be careful to not clobber other
 * live registers. For example, if we had some IRCode like
 *
 *   B0:
 *     load-param v1 Ljava/lang/Object;
 *     TRY_START
 *     const v0 123
 *     check-cast v1 LFoo;
 *   B1:
 *     move-result-pseudo v0
 *     return v0
 *     TRY_END
 *   B2:
 *     CATCH
 *     // handle failure of check-cast
 *     // Note that v0 has the value of 123 here because the check-cast failed
 *     add-int v0, v0, v0
 *
 * Inserting the move before the check-cast would cause v0 to have an object
 * (instead of integer) type inside the exception handler.
 *
 * The solution is to have the interference graph make check-cast's dest
 * register interfere with the live registers in both B0 and B1, so that when
 * the move gets inserted, it does not clobber any live registers.
 */
Graph GraphBuilder::build(const LivenessFixpointIterator& fixpoint_iter,
                          IRCode* code,
                          reg_t initial_regs,
                          const RangeSet& range_set) {
  Graph graph;
  auto ii = InstructionIterable(code);
  for (auto it = ii.begin(); it != ii.end(); ++it) {
    GraphBuilder::update_node_constraints(it.unwrap(), range_set, &graph);
  }

  auto& cfg = code->cfg();
  for (cfg::Block* block : cfg.blocks()) {
    LivenessDomain live_out = fixpoint_iter.get_live_out_vars_at(block);
    for (auto it = block->rbegin(); it != block->rend(); ++it) {
      if (it->type != MFLOW_OPCODE) {
        continue;
      }
      auto insn = it->insn;
      auto op = insn->opcode();
      if (opcode::has_range_form(op)) {
        graph.m_range_liveness.emplace(insn, live_out);
      }
      if (insn->dests_size()) {
        for (auto reg : live_out.elements()) {
          if (is_move(op) && reg == insn->src(0)) {
            continue;
          }
          graph.add_edge(insn->dest(), reg);
        }
        // We add interference edges between the wide src and dest operands of
        // an instruction even if the srcs are not live-out. This avoids
        // allocations like `xor-long v1, v0, v9`, where v1 and v0 overlap --
        // even though this is not a verification error, we have observed bugs
        // in the ART interpreter when handling these sorts of instructions.
        // However, we still want to be able to coalesce these symregs if they
        // don't actually interfere based on liveness information, so that we
        // can remove move-wide opcodes and/or use /2addr encodings.  As such,
        // we insert a specially marked edge that coalescing ignores but
        // coloring respects.
        if (insn->dest_is_wide()) {
          for (size_t i = 0; i < insn->srcs_size(); ++i) {
            if (insn->src_is_wide(i)) {
              graph.add_coalesceable_edge(insn->dest(), insn->src(i));
            }
          }
        }
      }
      if (op == OPCODE_CHECK_CAST) {
        auto move_result_pseudo = std::prev(it)->insn;
        for (auto reg : live_out.elements()) {
          graph.add_edge(move_result_pseudo->dest(), reg);
        }
      }
      // adding containment edge between liverange defined in insn and elements
      // in live-out set of insn
      if (insn->dests_size()) {
        for (auto reg : live_out.elements()) {
          graph.add_containment_edge(insn->dest(), reg);
        }
      }
      fixpoint_iter.analyze_instruction(it->insn, &live_out);
      // adding containment edge between liverange used in insn and elements
      // in live-in set of insn
      for (size_t i = 0; i < insn->srcs_size(); ++i) {
        for (auto reg : live_out.elements()) {
          graph.add_containment_edge(insn->src(i), reg);
        }
      }
    }
  }
  for (auto& pair : graph.nodes()) {
    auto reg = pair.first;
    auto& node = pair.second;
    if (reg >= initial_regs) {
      node.m_props.set(Node::SPILL);
    }
    assert_log(!node.m_type_domain.is_bottom(),
               "Type violation of v%u in code:\n%s\n",
               reg,
               SHOW(code));
  }
  return graph;
}
Exemplo n.º 18
0
/**
 * Fill `m_prologue_blocks` and return the register that we're "switching" on
 * (even if it's not a real switch statement)
 */
boost::optional<uint16_t> SwitchMethodPartitioning::compute_prologue_blocks(
    cfg::ControlFlowGraph* cfg,
    const cp::intraprocedural::FixpointIterator& fixpoint,
    bool verify_default_case) {

  for (const cfg::Block* b : cfg->blocks()) {
    always_assert_log(!b->is_catch(),
                      "SwitchMethodPartitioning does not support methods with "
                      "catch blocks. %d has a catch block in %s",
                      b->id(), SHOW(*cfg));
  }

  // First, add all the prologue blocks that forma a linear chain before the
  // case block selection blocks (a switch or an if-else tree) begin.
  for (cfg::Block* b = cfg->entry_block(); b != nullptr; b = b->follow_goto()) {
    m_prologue_blocks.push_back(b);
  }

  {
    auto last_prologue_block = m_prologue_blocks.back();
    auto last_prologue_insn_it = last_prologue_block->get_last_insn();
    always_assert(last_prologue_insn_it != last_prologue_block->end());
    auto last_prologue_insn = last_prologue_insn_it->insn;
    // If this method was compiled from a default-case-only switch, there will
    // be no branch opcode -- the method will always throw an
    // IllegalArgumentException.
    auto op = last_prologue_insn->opcode();
    always_assert(!verify_default_case || is_branch(op) || op == OPCODE_THROW);

    if (!is_branch(op)) {
      return boost::none;
    } else if (is_switch(op)) {
      // switch or if-else tree. Not both.
      return last_prologue_insn->src(0);
    }
  }

  // Handle a tree of if statements in the prologue. d8 emits this
  // when it would be smaller than a switch statement. The non-leaf nodes of the
  // tree are prologue blocks. The leaf nodes of the tree are case blocks.
  //
  // For example:
  //   load-param v0
  //   const v1 1
  //   if-eq v0 v1 CASE_1
  //   goto EXIT_BLOCK      ; or return
  //   const v1 2
  //   if-eq v0 v1 CASE_2
  //   goto EXIT_BLOCK      ; or return
  //   ...
  //
  // Traverse the tree in starting at the end of the linear chain of prologue
  // blocks and stopping before we reach a leaf.
  boost::optional<uint16_t> determining_reg;
  std::queue<cfg::Block*> to_visit;
  to_visit.push(m_prologue_blocks.back());
  while (!to_visit.empty()) {
    auto b = to_visit.front();
    to_visit.pop();

    // Leaf nodes have 0 or 1 successors (return or goto the epilogue blocks).
    // Throw edges are disallowed.
    if (b->succs().size() >= 2) {
      // The linear check above and this tree check both account for the
      // top-most node in the tree. Make sure we don't duplicate it
      if (b != m_prologue_blocks.back()) {
        m_prologue_blocks.push_back(b);

        // Verify there aren't extra instructions in here that we may lose track
        // of
        for (const auto& mie : InstructionIterable(b)) {
          auto insn = mie.insn;
          auto op = insn->opcode();
          always_assert_log(is_const(op) || is_conditional_branch(op),
                            "Unexpected instruction in if-else tree %s",
                            SHOW(insn));
        }
      }
      for (auto succ : b->succs()) {
        to_visit.push(succ->target());
      }

      // Make sure all blocks agree on which register is the determiner
      uint16_t candidate_reg = ::find_determining_reg(b, fixpoint);
      if (determining_reg == boost::none) {
        determining_reg = candidate_reg;
      } else {
        always_assert_log(
            *determining_reg == candidate_reg,
            "Conflict: which register are we switching on? %d != %d in %s",
            *determining_reg, candidate_reg, SHOW(*cfg));
      }
    }
  }
  always_assert_log(determining_reg != boost::none,
                    "Couldn't find determining register in %s", SHOW(*cfg));
  return determining_reg;
}