program build_program(unit_navigator const & un, clang::SourceManager const & sm, Visitor visitor, std::string const & static_prefix) { program res; res.vfn_map(un.vfn_map()); res.vfn_param_counts(un.vfn_param_counts()); for (unit_navigator::fns_const_iterator it = un.fns_begin(); it != un.fns_end(); ++it) { clang::FunctionDecl const * fn = *it; if (!fn->hasBody()) continue; std::string const & fnname = un.nm().make_decl_name(fn, static_prefix); if (!visitor.function_started(fnname)) continue; cfg c; detail::build_cfg_visitor<Visitor> cfg_visitor(visitor); build_cfg(res, c, un.nm(), fn, sm, res.fnames(), cfg_visitor, static_prefix); visitor.function_completed(fnname, c); res.add_cfg(fnname, c); } return res; }
static void test_conversion_to_ssa () { /* As above, construct a trivial function, gimplify it, and build a CFG: */ tree fndecl = build_trivial_high_gimple_function (); function *fun = DECL_STRUCT_FUNCTION (fndecl); ASSERT_TRUE (fun != NULL); build_cfg (fndecl); convert_to_ssa (fndecl); verify_three_block_gimple_cfg (fun); /* For out trivial test function we should now have something like this: test_fn () { <bb 2>: _1 = 42; return _1; } */ basic_block bb2 = get_real_block (fun); gimple *stmt_a = gimple_seq_first_stmt (bb_seq (bb2)); ASSERT_EQ (GIMPLE_ASSIGN, gimple_code (stmt_a)); gimple *stmt_b = stmt_a->next; ASSERT_EQ (GIMPLE_RETURN, gimple_code (stmt_b)); ASSERT_EQ (NULL, stmt_b->next); greturn *return_stmt = as_a <greturn *> (stmt_b); ASSERT_EQ (SSA_NAME, TREE_CODE (gimple_return_retval (return_stmt))); }
void SimplifyCFGPass::run_pass(DexStoresVector& stores, ConfigFiles& /* unused */, PassManager& mgr) { const auto& scope = build_class_scope(stores); auto total_insns_removed = walk::parallel::reduce_methods<int64_t, Scope>( scope, [](DexMethod* m) -> int64_t { auto code = m->get_code(); if (code == nullptr) { return 0; } int64_t before_insns = code->count_opcodes(); // build and linearize the CFG code->build_cfg(/* editable */ true); code->clear_cfg(); int64_t after_insns = code->count_opcodes(); return before_insns - after_insns; }, [](int64_t a, int64_t b) { return a + b; }); mgr.set_metric("insns_removed", total_insns_removed); }
void dynamic_cfgt::operator()( const ssa_local_unwindert &ssa_unwinder, const unwindable_local_SSAt &ssa, const summaryt &summary) { const goto_programt &goto_program=ssa.goto_function.body; build_cfg(goto_program, ssa_unwinder); assumptionst assumptions; build_from_invariants(ssa, summary, assumptions); add_assumptions(assumptions); }
// checks if any instances of :builder that get created in the method ever get // passed to a method (aside from when its own instance methods get invoked), // or if they get stored in a field, or if they escape as a return value. bool RemoveBuildersPass::escapes_stack(DexType* builder, DexMethod* method) { always_assert(builder != nullptr); always_assert(method != nullptr); auto code = method->get_code(); code->build_cfg(); auto blocks = cfg::postorder_sort(code->cfg().blocks()); std::reverse(blocks.begin(), blocks.end()); auto regs_size = method->get_code()->get_registers_size(); auto taint_map = get_tainted_regs(regs_size, blocks, builder); return tainted_reg_escapes( builder, method, *taint_map, m_enable_buildee_constr_change); }
static void test_building_cfg () { /* Construct a trivial function, and gimplify it: */ tree fndecl = build_trivial_high_gimple_function (); function *fun = DECL_STRUCT_FUNCTION (fndecl); ASSERT_TRUE (fun != NULL); /* Build a CFG. */ build_cfg (fndecl); /* The CFG-building code constructs a 4-block cfg (with ENTRY and EXIT): test_fn () { <bb 2>: D.65 = 42; <bb 3>: return D.65; } and then ought to merge blocks 2 and 3 in cleanup_tree_cfg. Hence we should end up with a simple 3-block cfg, the two "fake" ones, and a "real" one: [ENTRY] -> [block2] -> [EXIT] with code like this: test_fn () { <bb 2>: D.56 = 42; return D.56; } */ verify_three_block_gimple_cfg (fun); /* Verify the statements within the "real" block. */ basic_block bb2 = get_real_block (fun); gimple *stmt_a = gimple_seq_first_stmt (bb_seq (bb2)); ASSERT_EQ (GIMPLE_ASSIGN, gimple_code (stmt_a)); gimple *stmt_b = stmt_a->next; ASSERT_EQ (GIMPLE_RETURN, gimple_code (stmt_b)); ASSERT_EQ (NULL, stmt_b->next); }
//============================================================================= //------------------------------PhaseCFG--------------------------------------- PhaseCFG::PhaseCFG(ResourceArea*a,RootNode*r,Matcher&m): Phase(CFG), _bbs(a), _root(r) #ifndef PRODUCT , _trace_opto_pipelining(TraceOptoPipelining || C->method_has_option("TraceOptoPipelining")) #endif { ResourceMark rm; // I'll need a few machine-specific GotoNodes. Make an Ideal GotoNode, // then Match it into a machine-specific Node. Then clone the machine // Node on demand. Node *x = new (C, 1) GotoNode(NULL); x->init_req(0, x); _goto = m.match_tree(x); assert(_goto != NULL, ""); _goto->set_req(0,_goto); // Build the CFG in Reverse Post Order _num_blocks = build_cfg(); _broot = _bbs[_root->_idx]; }
TEST_F(PreVerify, GeneratedClass) { auto enumA = find_class_named(classes, ENUM_A); EXPECT_NE(nullptr, enumA); auto foo = find_class_named(classes, FOO); EXPECT_NE(nullptr, foo); auto foo_anonymous = find_class_named(classes, FOO_ANONYMOUS); EXPECT_NE(nullptr, foo_anonymous); auto enumB = find_class_named(classes, ENUM_B); EXPECT_NE(nullptr, enumB); auto method_use_enumA = DexMethod::get_method( "Lcom/facebook/redextest/Foo;.useEnumA:(Lcom/facebook/redextest/" "EnumA;)I"); auto switch_cases_A = collect_switch_cases(method_use_enumA); std::unordered_set<size_t> expected_switch_cases_A = {1, 2}; EXPECT_EQ(expected_switch_cases_A, switch_cases_A); auto method_use_enumB = DexMethod::get_method( "Lcom/facebook/redextest/Foo;.useEnumB:(Lcom/facebook/redextest/" "EnumB;)I"); auto switch_cases_B = collect_switch_cases(method_use_enumB); std::unordered_set<size_t> expected_switch_cases_B = {1, 2}; EXPECT_EQ(expected_switch_cases_B, switch_cases_B); auto method_use_enumA_again = DexMethod::get_method( "Lcom/facebook/redextest/Foo;.useEnumA_again:(Lcom/facebook/redextest/" "EnumA;)I"); auto switch_cases_A_again = collect_switch_cases(method_use_enumA_again); std::unordered_set<size_t> expected_switch_cases_A_again = {1, 3}; auto code = static_cast<DexMethod*>(method_use_enumA_again)->get_code(); code->build_cfg(); EXPECT_EQ(expected_switch_cases_A_again, switch_cases_A_again) << show(code->cfg()); }
static void test_expansion_to_rtl () { /* As above, construct a trivial function, gimplify it, build a CFG, and convert to SSA: */ tree fndecl = build_trivial_high_gimple_function (); function *fun = DECL_STRUCT_FUNCTION (fndecl); ASSERT_TRUE (fun != NULL); build_cfg (fndecl); convert_to_ssa (fndecl); /* We need a cgraph_node for it. */ cgraph_node::get_create (fndecl); /* Normally, cgraph_node::expand () would call init_function_start (and a bunch of other stuff), and invoke the expand pass, but it also runs all of the other passes. So just do the minimum needed to get from gimple-SSA to RTL. */ rtl_opt_pass *expand_pass = make_pass_expand (g); push_cfun (fun); init_function_start (fndecl); expand_pass->execute (fun); pop_cfun (); /* On x86_64, I get this: (note 3 1 2 2 [bb 2] NOTE_INSN_BASIC_BLOCK) (note 2 3 5 2 NOTE_INSN_FUNCTION_BEG) (insn 5 2 6 2 (set (reg:SI 87 [ D.59 ]) (const_int 42 [0x2a])) -1 (nil)) (insn 6 5 10 2 (set (reg:SI 88 [ <retval> ]) (reg:SI 87 [ D.59 ])) -1 (nil)) (insn 10 6 11 2 (set (reg/i:SI 0 ax) (reg:SI 88 [ <retval> ])) -1 (nil)) (insn 11 10 0 2 (use (reg/i:SI 0 ax)) -1 (nil)) On cr16-elf I get this: (note 4 1 2 2 [bb 2] NOTE_INSN_BASIC_BLOCK) (insn 2 4 3 2 (set (reg:SI 24) (reg/f:SI 16 virtual-incoming-args)) -1 (nil)) (note 3 2 6 2 NOTE_INSN_FUNCTION_BEG) (insn 6 3 7 2 (set (reg:HI 22 [ _1 ]) (const_int 42 [0x2a])) -1 (nil)) (insn 7 6 11 2 (set (reg:HI 23 [ <retval> ]) (reg:HI 22 [ _1 ])) -1 (nil)) (insn 11 7 12 2 (set (reg/i:HI 0 r0) (reg:HI 23 [ <retval> ])) -1 (nil)) (insn 12 11 0 2 (use (reg/i:HI 0 r0)) -1 (nil)). */ verify_three_block_rtl_cfg (fun); /* Verify as much of the RTL as we can whilst avoiding target-specific behavior. */ basic_block bb2 = get_real_block (fun); /* Expect a NOTE_INSN_BASIC_BLOCK... */ rtx_insn *insn = BB_HEAD (bb2); ASSERT_TRUE (insn != NULL); ASSERT_EQ (NOTE, insn->code); ASSERT_EQ (NOTE_INSN_BASIC_BLOCK, NOTE_KIND (insn)); ASSERT_EQ (bb2, NOTE_BASIC_BLOCK (insn)); /* ...etc; any further checks are likely to over-specify things and run us into target dependencies. */ }
/* imc_reg_alloc is the main loop of the allocation algorithm. It operates * on a single compilation unit at a time. */ void imc_reg_alloc(struct Parrot_Interp *interpreter, IMC_Unit * unit) { int to_spill; int todo, first; if (!unit) return; if (!optimizer_level && pasm_file) return; init_tables(interpreter); allocated = 0; #if IMC_TRACE fprintf(stderr, "reg_alloc.c: imc_reg_alloc\n"); if (unit->instructions->r[1] && unit->instructions->r[1]->pcc_sub) { fprintf(stderr, "img_reg_alloc: pcc_sub (nargs = %d)\n", unit->instructions->r[1]->pcc_sub->nargs); } #endif debug(interpreter, DEBUG_IMC, "\n------------------------\n"); debug(interpreter, DEBUG_IMC, "processing sub %s\n", function); debug(interpreter, DEBUG_IMC, "------------------------\n\n"); if (IMCC_INFO(interpreter)->verbose || (IMCC_INFO(interpreter)->debug & DEBUG_IMC)) imc_stat_init(unit); /* consecutive labels, if_branch, unused_labels ... */ pre_optimize(interpreter, unit); if (optimizer_level == OPT_PRE && pasm_file) return; nodeStack = imcstack_new(); unit->n_spilled = 0; todo = first = 1; while (todo) { find_basic_blocks(interpreter, unit, first); build_cfg(interpreter, unit); if (first && (IMCC_INFO(interpreter)->debug & DEBUG_CFG)) dump_cfg(unit); first = 0; todo = cfg_optimize(interpreter, unit); } todo = first = 1; while (todo) { if (!first) { find_basic_blocks(interpreter, unit, 0); build_cfg(interpreter, unit); } first = 0; compute_dominators(interpreter, unit); find_loops(interpreter, unit); build_reglist(interpreter, unit); life_analysis(interpreter, unit); /* optimize, as long as there is something to do */ if (dont_optimize) todo = 0; else { todo = optimize(interpreter, unit); if (todo) pre_optimize(interpreter, unit); } } todo = 1; #if !DOIT_AGAIN_SAM build_interference_graph(interpreter, unit); #endif while (todo) { #if DOIT_AGAIN_SAM build_interference_graph(interpreter, unit); #endif if (optimizer_level & OPT_SUB) allocate_wanted_regs(unit); compute_spilling_costs(interpreter, unit); #ifdef DO_SIMPLIFY /* simplify until no changes can be made */ while (simplify(unit)) {} #endif order_spilling(unit); /* put the remaining items on stack */ to_spill = try_allocate(interpreter, unit); allocated = 1; if ( to_spill >= 0 ) { allocated = 0; spill(interpreter, unit, to_spill); /* * build the new cfg/reglist on the fly in spill() and * do life analysis there for only the involved regs */ #if DOIT_AGAIN_SAM find_basic_blocks(interpreter, unit, 0); build_cfg(interpreter, unit); build_reglist(interpreter, unit); life_analysis(interpreter); #endif } else { /* the process is finished */ todo = 0; } } if (optimizer_level & OPT_SUB) sub_optimize(interpreter, unit); if (IMCC_INFO(interpreter)->debug & DEBUG_IMC) dump_instructions(unit); if (IMCC_INFO(interpreter)->verbose || (IMCC_INFO(interpreter)->debug & DEBUG_IMC)) print_stat(interpreter, unit); imcstack_free(nodeStack); }