void output_bb (struct output_block *ob, basic_block bb, struct function *fn) { gimple_stmt_iterator bsi = gsi_start_bb (bb); streamer_write_record_start (ob, (!gsi_end_p (bsi)) || phi_nodes (bb) ? LTO_bb1 : LTO_bb0); streamer_write_uhwi (ob, bb->index); streamer_write_gcov_count (ob, bb->count); streamer_write_hwi (ob, bb->frequency); streamer_write_hwi (ob, bb->flags); if (!gsi_end_p (bsi) || phi_nodes (bb)) { /* Output the statements. The list of statements is terminated with a zero. */ for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi)) { int region; gimple stmt = gsi_stmt (bsi); output_gimple_stmt (ob, stmt); /* Emit the EH region holding STMT. */ region = lookup_stmt_eh_lp_fn (fn, stmt); if (region != 0) { streamer_write_record_start (ob, LTO_eh_region); streamer_write_hwi (ob, region); } else streamer_write_record_start (ob, LTO_null); } streamer_write_record_start (ob, LTO_null); for (gphi_iterator psi = gsi_start_phis (bb); !gsi_end_p (psi); gsi_next (&psi)) { gphi *phi = psi.phi (); /* Only emit PHIs for gimple registers. PHI nodes for .MEM will be filled in on reading when the SSA form is updated. */ if (!virtual_operand_p (gimple_phi_result (phi))) output_phi (ob, phi); } streamer_write_record_start (ob, LTO_null); } }
static unsigned int execute_stackleak_tree_instrument(void) { basic_block bb, entry_bb; bool prologue_instrumented = false, is_leaf = true; entry_bb = ENTRY_BLOCK_PTR_FOR_FN(cfun)->next_bb; // 1. loop through BBs and GIMPLE statements FOR_EACH_BB_FN(bb, cfun) { gimple_stmt_iterator gsi; for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) { gimple stmt; stmt = gsi_stmt(gsi); if (is_gimple_call(stmt)) is_leaf = false; // gimple match: align 8 built-in BUILT_IN_NORMAL:BUILT_IN_ALLOCA attributes <tree_list 0xb7576450> if (!is_alloca(stmt)) continue; // 2. insert stack overflow check before each __builtin_alloca call stackleak_check_alloca(&gsi); // 3. insert track call after each __builtin_alloca call stackleak_add_instrumentation(&gsi); if (bb == entry_bb) prologue_instrumented = true; } }
void cgraph_rebuild_references (void) { basic_block bb; struct cgraph_node *node = cgraph_get_node (current_function_decl); gimple_stmt_iterator gsi; struct ipa_ref *ref; int i; /* Keep speculative references for further cgraph edge expansion. */ for (i = 0; ipa_ref_list_reference_iterate (&node->ref_list, i, ref);) if (!ref->speculative) ipa_remove_reference (ref); else i++; node->count = ENTRY_BLOCK_PTR_FOR_FN (cfun)->count; FOR_EACH_BB_FN (bb, cfun) { for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) ipa_record_stmt_references (node, gsi_stmt (gsi)); for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi)) ipa_record_stmt_references (node, gsi_stmt (gsi)); } record_eh_tables (node, cfun); }
unsigned int pass_generic() { warning(0, "%<%s%>", context); basic_block bb; gimple_stmt_iterator gsi; /* fprintf( stderr, "* MYPROOF on %s()\n", IDENTIFIER_POINTER(DECL_NAME(cfun->decl)) ); */ FOR_EACH_BB( bb ) { /* fprintf( stderr, " ** BB %d\n", bb->index ); */ for ( gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi) ) { /* print_gimple_stmt ( stdout, gsi_stmt(gsi), 0, 0 ); */ read_stmt( gsi_stmt(gsi) ); } } /* recursively read loops */ if ( cfun->x_current_loops != NULL ) { read_loop( cfun->x_current_loops->tree_root ); } return 0; }
unsigned int pass_variable() { warning(0, "%<%s%>", context); const char *identifier = IDENTIFIER_POINTER(DECL_NAME(cfun->decl)); t_myproof_function *function = mylist_find( g_myproof_pass->functions, function_exists, (void*)identifier ); if ( function == NULL ) { fprintf( stderr, "myproof: pass_variable: unhandled \'%s\' function\n", identifier ); return 0; } basic_block bb; gimple_stmt_iterator gsi; FOR_EACH_BB( bb ) { for ( gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi) ) { read_stmt( gsi_stmt(gsi), function ); } } if ( cfun->x_current_loops != NULL ) { read_loop( cfun->x_current_loops->tree_root, function ); } return 0; }
int getSize(char *name) { struct cgraph_node *node; basic_block bb; gimple statement; enum gimple_code code; gimple_stmt_iterator gsi; int size = 0; for (node = cgraph_nodes; node; node = node->next) { /* Nodes without a body, and clone nodes are not interesting. */ if (!gimple_has_body_p(node->decl) || node->clone_of) continue; if (strcmp(cgraph_node_name(node), name) == 0) { set_cfun(DECL_STRUCT_FUNCTION(node->decl)); //fprintf(stderr, "%s --> ", cgraph_node_name(node)); FOR_ALL_BB(bb) { for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) { statement = gsi_stmt(gsi); code = gimple_code(statement); //debug_gimple_stmt(statement); if (code != GIMPLE_CALL) { size++; } } } return size; } }
static void set_callers_may_not_allocate_frame (function *fn) { basic_block bb; gimple_stmt_iterator gsi; gimple *stmt; tree called_fn_tree; function *called_fn; if (fn->cfg == NULL) return; FOR_EACH_BB_FN (bb, fn) { for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) { stmt = gsi_stmt (gsi); if (is_gimple_call (stmt)) { called_fn_tree = gimple_call_fndecl (stmt); if (called_fn_tree != NULL) { called_fn = DECL_STRUCT_FUNCTION (called_fn_tree); if (called_fn != NULL) called_fn->machine->callers_may_not_allocate_frame = true; } } } } return; }
void cgraph_rebuild_references (void) { basic_block bb; struct cgraph_node *node = cgraph_get_node (current_function_decl); gimple_stmt_iterator gsi; ipa_remove_all_references (&node->ref_list); node->count = ENTRY_BLOCK_PTR->count; FOR_EACH_BB (bb) { for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) { gimple stmt = gsi_stmt (gsi); walk_stmt_load_store_addr_ops (stmt, node, mark_load, mark_store, mark_address); } for (gsi = gsi_start (phi_nodes (bb)); !gsi_end_p (gsi); gsi_next (&gsi)) walk_stmt_load_store_addr_ops (gsi_stmt (gsi), node, mark_load, mark_store, mark_address); } record_eh_tables (node, cfun); }
void ubsan_expand_bounds_ifn (gimple_stmt_iterator *gsi) { gimple stmt = gsi_stmt (*gsi); location_t loc = gimple_location (stmt); gcc_assert (gimple_call_num_args (stmt) == 3); /* Pick up the arguments of the UBSAN_BOUNDS call. */ tree type = TREE_TYPE (TREE_TYPE (gimple_call_arg (stmt, 0))); tree index = gimple_call_arg (stmt, 1); tree orig_index_type = TREE_TYPE (index); tree bound = gimple_call_arg (stmt, 2); gimple_stmt_iterator gsi_orig = *gsi; /* Create condition "if (index > bound)". */ basic_block then_bb, fallthru_bb; gimple_stmt_iterator cond_insert_point = create_cond_insert_point (gsi, 0/*before_p*/, false, true, &then_bb, &fallthru_bb); index = fold_convert (TREE_TYPE (bound), index); index = force_gimple_operand_gsi (&cond_insert_point, index, true/*simple_p*/, NULL_TREE, false/*before*/, GSI_NEW_STMT); gimple g = gimple_build_cond (GT_EXPR, index, bound, NULL_TREE, NULL_TREE); gimple_set_location (g, loc); gsi_insert_after (&cond_insert_point, g, GSI_NEW_STMT); /* Generate __ubsan_handle_out_of_bounds call. */ *gsi = gsi_after_labels (then_bb); if (flag_sanitize_undefined_trap_on_error) g = gimple_build_call (builtin_decl_explicit (BUILT_IN_TRAP), 0); else { tree data = ubsan_create_data ("__ubsan_out_of_bounds_data", &loc, NULL, ubsan_type_descriptor (type, UBSAN_PRINT_ARRAY), ubsan_type_descriptor (orig_index_type), NULL_TREE); data = build_fold_addr_expr_loc (loc, data); enum built_in_function bcode = flag_sanitize_recover ? BUILT_IN_UBSAN_HANDLE_OUT_OF_BOUNDS : BUILT_IN_UBSAN_HANDLE_OUT_OF_BOUNDS_ABORT; tree fn = builtin_decl_explicit (bcode); tree val = force_gimple_operand_gsi (gsi, ubsan_encode_value (index), true, NULL_TREE, true, GSI_SAME_STMT); g = gimple_build_call (fn, 2, data, val); } gimple_set_location (g, loc); gsi_insert_before (gsi, g, GSI_SAME_STMT); /* Get rid of the UBSAN_BOUNDS call from the IR. */ unlink_stmt_vdef (stmt); gsi_remove (&gsi_orig, true); /* Point GSI to next logical statement. */ *gsi = gsi_start_bb (fallthru_bb); }
static unsigned int execute_return_slot_opt (void) { basic_block bb; FOR_EACH_BB_FN (bb, cfun) { gimple_stmt_iterator gsi; for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) { gimple stmt = gsi_stmt (gsi); bool slot_opt_p; if (is_gimple_call (stmt) && gimple_call_lhs (stmt) && !gimple_call_return_slot_opt_p (stmt) && aggregate_value_p (TREE_TYPE (gimple_call_lhs (stmt)), gimple_call_fndecl (stmt))) { /* Check if the location being assigned to is clobbered by the call. */ slot_opt_p = dest_safe_for_nrv_p (stmt); gimple_call_set_return_slot_opt (stmt, slot_opt_p); } } }
void cgraph_edge::rebuild_references (void) { basic_block bb; cgraph_node *node = cgraph_node::get (current_function_decl); gimple_stmt_iterator gsi; ipa_ref *ref = NULL; int i; /* Keep speculative references for further cgraph edge expansion. */ for (i = 0; node->iterate_reference (i, ref);) if (!ref->speculative) ref->remove_reference (); else i++; node->count = ENTRY_BLOCK_PTR_FOR_FN (cfun)->count; FOR_EACH_BB_FN (bb, cfun) { for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) node->record_stmt_references (gsi_stmt (gsi)); for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi)) node->record_stmt_references (gsi_stmt (gsi)); } record_eh_tables (node, cfun); if (node->instrumented_version && !node->instrumentation_clone) node->create_reference (node->instrumented_version, IPA_REF_CHKP, NULL); }
static unsigned int ubsan_pass (void) { basic_block bb; gimple_stmt_iterator gsi; FOR_EACH_BB (bb) { for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi);) { gimple stmt = gsi_stmt (gsi); if (is_gimple_debug (stmt) || gimple_clobber_p (stmt)) { gsi_next (&gsi); continue; } if (flag_sanitize & SANITIZE_NULL) { if (gimple_store_p (stmt)) instrument_null (gsi, true); if (gimple_assign_load_p (stmt)) instrument_null (gsi, false); } gsi_next (&gsi); } } return 0; }
/* * find all asm() stmts that clobber r10 and add a reload of r10 */ static unsigned int execute_kernexec_reload(void) { basic_block bb; // 1. loop through BBs and GIMPLE statements FOR_EACH_BB(bb) { gimple_stmt_iterator gsi; for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) { // gimple match: __asm__ ("" : : : "r10"); gimple asm_stmt; size_t nclobbers; // is it an asm ... asm_stmt = gsi_stmt(gsi); if (gimple_code(asm_stmt) != GIMPLE_ASM) continue; // ... clobbering r10 nclobbers = gimple_asm_nclobbers(asm_stmt); while (nclobbers--) { tree op = gimple_asm_clobber_op(asm_stmt, nclobbers); if (strcmp(TREE_STRING_POINTER(TREE_VALUE(op)), "r10")) continue; kernexec_reload_fptr_mask(&gsi); //print_gimple_stmt(stderr, asm_stmt, 0, TDF_LINENO); break; } } } return 0; }
void vect_pattern_recog (loop_vec_info loop_vinfo) { struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo); unsigned int nbbs = loop->num_nodes; gimple_stmt_iterator si; unsigned int i, j; gimple (* vect_recog_func_ptr) (gimple, tree *, tree *); if (vect_print_dump_info (REPORT_DETAILS)) fprintf (vect_dump, "=== vect_pattern_recog ==="); /* Scan through the loop stmts, applying the pattern recognition functions starting at each stmt visited: */ for (i = 0; i < nbbs; i++) { basic_block bb = bbs[i]; for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si)) { /* Scan over all generic vect_recog_xxx_pattern functions. */ for (j = 0; j < NUM_PATTERNS; j++) { vect_recog_func_ptr = vect_vect_recog_func_ptrs[j]; vect_pattern_recog_1 (vect_recog_func_ptr, si); } } } }
static bool bb_no_side_effects_p (basic_block bb) { gimple_stmt_iterator gsi; for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) { gimple *stmt = gsi_stmt (gsi); if (is_gimple_debug (stmt)) continue; if (gimple_has_side_effects (stmt) || gimple_uses_undefined_value_p (stmt) || gimple_could_trap_p (stmt) || gimple_vuse (stmt) /* const calls don't match any of the above, yet they could still have some side-effects - they could contain gimple_could_trap_p statements, like floating point exceptions or integer division by zero. See PR70586. FIXME: perhaps gimple_has_side_effects or gimple_could_trap_p should handle this. */ || is_gimple_call (stmt)) return false; } return true; }
static void sese_build_liveouts_bb (sese region, bitmap liveouts, basic_block bb) { edge e; edge_iterator ei; ssa_op_iter iter; use_operand_p use_p; FOR_EACH_EDGE (e, ei, bb->succs) for (gphi_iterator bsi = gsi_start_phis (e->dest); !gsi_end_p (bsi); gsi_next (&bsi)) sese_build_liveouts_use (region, liveouts, bb, PHI_ARG_DEF_FROM_EDGE (bsi.phi (), e)); for (gimple_stmt_iterator bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi)) { gimple *stmt = gsi_stmt (bsi); if (is_gimple_debug (stmt)) continue; FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_ALL_USES) sese_build_liveouts_use (region, liveouts, bb, USE_FROM_PTR (use_p)); } }
static bool afdo_set_bb_count (basic_block bb, const stmt_set &promoted) { gimple_stmt_iterator gsi; edge e; edge_iterator ei; gcov_type max_count = 0; bool has_annotated = false; for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) { count_info info; gimple *stmt = gsi_stmt (gsi); if (gimple_clobber_p (stmt) || is_gimple_debug (stmt)) continue; if (afdo_source_profile->get_count_info (stmt, &info)) { if (info.count > max_count) max_count = info.count; has_annotated = true; if (info.targets.size () > 0 && promoted.find (stmt) == promoted.end ()) afdo_vpt (&gsi, info.targets, false); } } if (!has_annotated) return false; for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) afdo_source_profile->mark_annotated (gimple_location (gsi_stmt (gsi))); for (gphi_iterator gpi = gsi_start_phis (bb); !gsi_end_p (gpi); gsi_next (&gpi)) { gphi *phi = gpi.phi (); size_t i; for (i = 0; i < gimple_phi_num_args (phi); i++) afdo_source_profile->mark_annotated (gimple_phi_arg_location (phi, i)); } FOR_EACH_EDGE (e, ei, bb->succs) afdo_source_profile->mark_annotated (e->goto_locus); bb->count = profile_count::from_gcov_type (max_count).afdo (); return true; }
void input_bb (struct lto_input_block *ib, enum LTO_tags tag, struct data_in *data_in, struct function *fn, int count_materialization_scale) { unsigned int index; basic_block bb; gimple_stmt_iterator bsi; /* This routine assumes that CFUN is set to FN, as it needs to call basic GIMPLE routines that use CFUN. */ gcc_assert (cfun == fn); index = streamer_read_uhwi (ib); bb = BASIC_BLOCK_FOR_FUNCTION (fn, index); bb->count = (streamer_read_hwi (ib) * count_materialization_scale + REG_BR_PROB_BASE / 2) / REG_BR_PROB_BASE; bb->loop_depth = streamer_read_hwi (ib); bb->frequency = streamer_read_hwi (ib); bb->flags = streamer_read_hwi (ib); /* LTO_bb1 has statements. LTO_bb0 does not. */ if (tag == LTO_bb0) return; bsi = gsi_start_bb (bb); tag = streamer_read_record_start (ib); while (tag) { gimple stmt = input_gimple_stmt (ib, data_in, fn, tag); if (!is_gimple_debug (stmt)) find_referenced_vars_in (stmt); gsi_insert_after (&bsi, stmt, GSI_NEW_STMT); /* After the statement, expect a 0 delimiter or the EH region that the previous statement belongs to. */ tag = streamer_read_record_start (ib); lto_tag_check_set (tag, 2, LTO_eh_region, LTO_null); if (tag == LTO_eh_region) { HOST_WIDE_INT region = streamer_read_hwi (ib); gcc_assert (region == (int) region); add_stmt_to_eh_lp (stmt, region); } tag = streamer_read_record_start (ib); } tag = streamer_read_record_start (ib); while (tag) { gimple phi = input_phi (ib, bb, data_in, fn); find_referenced_vars_in (phi); tag = streamer_read_record_start (ib); } }
static bool should_duplicate_loop_header_p (basic_block header, struct loop *loop, int *limit) { gimple_stmt_iterator bsi; gimple last; /* Do not copy one block more than once (we do not really want to do loop peeling here). */ if (header->aux) return false; /* Loop header copying usually increases size of the code. This used not to be true, since quite often it is possible to verify that the condition is satisfied in the first iteration and therefore to eliminate it. Jump threading handles these cases now. */ if (optimize_loop_for_size_p (loop)) return false; gcc_assert (EDGE_COUNT (header->succs) > 0); if (single_succ_p (header)) return false; if (flow_bb_inside_loop_p (loop, EDGE_SUCC (header, 0)->dest) && flow_bb_inside_loop_p (loop, EDGE_SUCC (header, 1)->dest)) return false; /* If this is not the original loop header, we want it to have just one predecessor in order to match the && pattern. */ if (header != loop->header && !single_pred_p (header)) return false; last = last_stmt (header); if (gimple_code (last) != GIMPLE_COND) return false; /* Approximately copy the conditions that used to be used in jump.c -- at most 20 insns and no calls. */ for (bsi = gsi_start_bb (header); !gsi_end_p (bsi); gsi_next (&bsi)) { last = gsi_stmt (bsi); if (gimple_code (last) == GIMPLE_LABEL) continue; if (is_gimple_debug (last)) continue; if (is_gimple_call (last)) return false; *limit -= estimate_num_insns (last, &eni_size_weights); if (*limit < 0) return false; } return true; }
static void adjust_simduid_builtins (hash_table<simduid_to_vf> *htab) { basic_block bb; FOR_EACH_BB_FN (bb, cfun) { gimple_stmt_iterator i; for (i = gsi_start_bb (bb); !gsi_end_p (i); gsi_next (&i)) { unsigned int vf = 1; enum internal_fn ifn; gimple stmt = gsi_stmt (i); tree t; if (!is_gimple_call (stmt) || !gimple_call_internal_p (stmt)) continue; ifn = gimple_call_internal_fn (stmt); switch (ifn) { case IFN_GOMP_SIMD_LANE: case IFN_GOMP_SIMD_VF: case IFN_GOMP_SIMD_LAST_LANE: break; default: continue; } tree arg = gimple_call_arg (stmt, 0); gcc_assert (arg != NULL_TREE); gcc_assert (TREE_CODE (arg) == SSA_NAME); simduid_to_vf *p = NULL, data; data.simduid = DECL_UID (SSA_NAME_VAR (arg)); if (htab) { p = htab->find (&data); if (p) vf = p->vf; } switch (ifn) { case IFN_GOMP_SIMD_VF: t = build_int_cst (unsigned_type_node, vf); break; case IFN_GOMP_SIMD_LANE: t = build_int_cst (unsigned_type_node, 0); break; case IFN_GOMP_SIMD_LAST_LANE: t = gimple_call_arg (stmt, 1); break; default: gcc_unreachable (); } update_call_from_tree (&i, t); } }
static bool generate_loops_for_partition (struct loop *loop, bitmap partition, bool copy_p) { unsigned i, x; gimple_stmt_iterator bsi; basic_block *bbs; if (copy_p) { loop = copy_loop_before (loop); create_preheader (loop, CP_SIMPLE_PREHEADERS); create_bb_after_loop (loop); } if (loop == NULL) return false; /* Remove stmts not in the PARTITION bitmap. The order in which we visit the phi nodes and the statements is exactly as in stmts_from_loop. */ bbs = get_loop_body_in_dom_order (loop); for (x = 0, i = 0; i < loop->num_nodes; i++) { basic_block bb = bbs[i]; for (bsi = gsi_start_phis (bb); !gsi_end_p (bsi);) if (!bitmap_bit_p (partition, x++)) { gimple phi = gsi_stmt (bsi); if (!is_gimple_reg (gimple_phi_result (phi))) mark_virtual_phi_result_for_renaming (phi); remove_phi_node (&bsi, true); } else gsi_next (&bsi); for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi);) { gimple stmt = gsi_stmt (bsi); if (gimple_code (gsi_stmt (bsi)) != GIMPLE_LABEL && !bitmap_bit_p (partition, x++)) { unlink_stmt_vdef (stmt); gsi_remove (&bsi, true); release_defs (stmt); } else gsi_next (&bsi); } } free (bbs); return true; }
static void print_graphite_scop_statistics (FILE* file, scop_p scop) { long n_bbs = 0; long n_loops = 0; long n_stmts = 0; long n_conditions = 0; long n_p_bbs = 0; long n_p_loops = 0; long n_p_stmts = 0; long n_p_conditions = 0; basic_block bb; FOR_ALL_BB (bb) { gimple_stmt_iterator psi; loop_p loop = bb->loop_father; if (!bb_in_sese_p (bb, SCOP_REGION (scop))) continue; n_bbs++; n_p_bbs += bb->count; if (EDGE_COUNT (bb->succs) > 1) { n_conditions++; n_p_conditions += bb->count; } for (psi = gsi_start_bb (bb); !gsi_end_p (psi); gsi_next (&psi)) { n_stmts++; n_p_stmts += bb->count; } if (loop->header == bb && loop_in_sese_p (loop, SCOP_REGION (scop))) { n_loops++; n_p_loops += bb->count; } } fprintf (file, "\nSCoP statistics ("); fprintf (file, "BBS:%ld, ", n_bbs); fprintf (file, "LOOPS:%ld, ", n_loops); fprintf (file, "CONDITIONS:%ld, ", n_conditions); fprintf (file, "STMTS:%ld)\n", n_stmts); fprintf (file, "\nSCoP profiling statistics ("); fprintf (file, "BBS:%ld, ", n_p_bbs); fprintf (file, "LOOPS:%ld, ", n_p_loops); fprintf (file, "CONDITIONS:%ld, ", n_p_conditions); fprintf (file, "STMTS:%ld)\n", n_p_stmts); }
static bool trivially_empty_bb_p (basic_block bb) { gimple_stmt_iterator gsi; for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) if (gimple_code (gsi_stmt (gsi)) != GIMPLE_DEBUG) return false; return true; }
static gimple harmful_stmt_in_bb (basic_block scop_entry, loop_p outer_loop, basic_block bb) { gimple_stmt_iterator gsi; for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) if (!stmt_simple_for_scop_p (scop_entry, outer_loop, gsi_stmt (gsi), bb)) return gsi_stmt (gsi); return NULL; }
static void print_global_statistics (FILE* file) { long n_bbs = 0; long n_loops = 0; long n_stmts = 0; long n_conditions = 0; long n_p_bbs = 0; long n_p_loops = 0; long n_p_stmts = 0; long n_p_conditions = 0; basic_block bb; FOR_ALL_BB (bb) { gimple_stmt_iterator psi; n_bbs++; n_p_bbs += bb->count; /* Ignore artificial surrounding loop. */ if (bb == bb->loop_father->header && bb->index != 0) { n_loops++; n_p_loops += bb->count; } if (EDGE_COUNT (bb->succs) > 1) { n_conditions++; n_p_conditions += bb->count; } for (psi = gsi_start_bb (bb); !gsi_end_p (psi); gsi_next (&psi)) { n_stmts++; n_p_stmts += bb->count; } } fprintf (file, "\nGlobal statistics ("); fprintf (file, "BBS:%ld, ", n_bbs); fprintf (file, "LOOPS:%ld, ", n_loops); fprintf (file, "CONDITIONS:%ld, ", n_conditions); fprintf (file, "STMTS:%ld)\n", n_stmts); fprintf (file, "\nGlobal profiling statistics ("); fprintf (file, "BBS:%ld, ", n_p_bbs); fprintf (file, "LOOPS:%ld, ", n_p_loops); fprintf (file, "CONDITIONS:%ld, ", n_p_conditions); fprintf (file, "STMTS:%ld)\n", n_p_stmts); }
static void init_copy_prop (void) { basic_block bb; n_copy_of = num_ssa_names; copy_of = XCNEWVEC (prop_value_t, n_copy_of); FOR_EACH_BB_FN (bb, cfun) { for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si)) { gimple *stmt = gsi_stmt (si); ssa_op_iter iter; tree def; /* The only statements that we care about are those that may generate useful copies. We also need to mark conditional jumps so that their outgoing edges are added to the work lists of the propagator. */ if (stmt_ends_bb_p (stmt)) prop_set_simulate_again (stmt, true); else if (stmt_may_generate_copy (stmt)) prop_set_simulate_again (stmt, true); else prop_set_simulate_again (stmt, false); /* Mark all the outputs of this statement as not being the copy of anything. */ FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_ALL_DEFS) if (!prop_simulate_again_p (stmt)) set_copy_of_val (def, def); } for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si)) { gphi *phi = si.phi (); tree def; def = gimple_phi_result (phi); if (virtual_operand_p (def)) prop_set_simulate_again (phi, false); else prop_set_simulate_again (phi, true); if (!prop_simulate_again_p (phi)) set_copy_of_val (def, def); } } }
/* Return the number of non-debug statements in a block. */ static unsigned int count_stmts_in_block (basic_block bb) { gimple_stmt_iterator gsi; unsigned int num_stmts = 0; for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) { gimple *stmt = gsi_stmt (gsi); if (!is_gimple_debug (stmt)) num_stmts++; } return num_stmts; }
unsigned tree_num_loop_insns (struct loop *loop, eni_weights *weights) { basic_block *body = get_loop_body (loop); gimple_stmt_iterator gsi; unsigned size = 0, i; for (i = 0; i < loop->num_nodes; i++) for (gsi = gsi_start_bb (body[i]); !gsi_end_p (gsi); gsi_next (&gsi)) size += estimate_num_insns (gsi_stmt (gsi), weights); free (body); return size; }
gimple_stmt_iterator gsi_for_stmt (gimple *stmt) { gimple_stmt_iterator i; basic_block bb = gimple_bb (stmt); if (gimple_code (stmt) == GIMPLE_PHI) i = gsi_start_phis (bb); else i = gsi_start_bb (bb); i.ptr = stmt; return i; }
static int count_insns (basic_block bb) { gimple_stmt_iterator gsi; gimple stmt; int n = 0; for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) { stmt = gsi_stmt (gsi); n += estimate_num_insns (stmt, &eni_size_weights); } return n; }