static unsigned int execute_return_slot_opt (void) { basic_block bb; FOR_EACH_BB (bb) { block_stmt_iterator i; for (i = bsi_start (bb); !bsi_end_p (i); bsi_next (&i)) { tree stmt = bsi_stmt (i); tree call; if (TREE_CODE (stmt) == MODIFY_EXPR && (call = TREE_OPERAND (stmt, 1), TREE_CODE (call) == CALL_EXPR) && !CALL_EXPR_RETURN_SLOT_OPT (call) && aggregate_value_p (call, call)) /* Check if the location being assigned to is call-clobbered. */ CALL_EXPR_RETURN_SLOT_OPT (call) = dest_safe_for_nrv_p (TREE_OPERAND (stmt, 0)) ? 1 : 0; } } return 0; }
/* Return TRUE if block BB has no executable statements, otherwise return FALSE. */ bool empty_block_p (basic_block bb) { block_stmt_iterator bsi; /* BB must have no executable statements. */ bsi = bsi_start (bb); while (!bsi_end_p (bsi) && (TREE_CODE (bsi_stmt (bsi)) == LABEL_EXPR || IS_EMPTY_STMT (bsi_stmt (bsi)))) bsi_next (&bsi); if (!bsi_end_p (bsi)) return false; return true; }
bool potentially_threadable_block (basic_block bb) { block_stmt_iterator bsi; /* If BB has a single successor or a single predecessor, then there is no threading opportunity. */ if (single_succ_p (bb) || single_pred_p (bb)) return false; /* If BB does not end with a conditional, switch or computed goto, then there is no threading opportunity. */ bsi = bsi_last (bb); if (bsi_end_p (bsi) || ! bsi_stmt (bsi) || (TREE_CODE (bsi_stmt (bsi)) != COND_EXPR && TREE_CODE (bsi_stmt (bsi)) != GOTO_EXPR && TREE_CODE (bsi_stmt (bsi)) != SWITCH_EXPR)) return false; return true; }
unsigned tree_num_loop_insns (struct loop *loop) { basic_block *body = get_loop_body (loop); block_stmt_iterator bsi; unsigned size = 1, i; for (i = 0; i < loop->num_nodes; i++) for (bsi = bsi_start (body[i]); !bsi_end_p (bsi); bsi_next (&bsi)) size += estimate_num_insns (bsi_stmt (bsi)); free (body); return size; }
static struct mem_ref_group * gather_memory_references (struct loop *loop, bool *no_other_refs) { basic_block *body = get_loop_body_in_dom_order (loop); basic_block bb; unsigned i; block_stmt_iterator bsi; tree stmt, lhs, rhs, call; struct mem_ref_group *refs = NULL; *no_other_refs = true; /* Scan the loop body in order, so that the former references precede the later ones. */ for (i = 0; i < loop->num_nodes; i++) { bb = body[i]; if (bb->loop_father != loop) continue; for (bsi = bsi_start (bb); !bsi_end_p (bsi); bsi_next (&bsi)) { stmt = bsi_stmt (bsi); call = get_call_expr_in (stmt); if (call && !(call_expr_flags (call) & ECF_CONST)) *no_other_refs = false; if (TREE_CODE (stmt) != GIMPLE_MODIFY_STMT) { if (!ZERO_SSA_OPERANDS (stmt, SSA_OP_ALL_VIRTUALS)) *no_other_refs = false; continue; } lhs = GIMPLE_STMT_OPERAND (stmt, 0); rhs = GIMPLE_STMT_OPERAND (stmt, 1); if (REFERENCE_CLASS_P (rhs)) *no_other_refs &= gather_memory_references_ref (loop, &refs, rhs, false, stmt); if (REFERENCE_CLASS_P (lhs)) *no_other_refs &= gather_memory_references_ref (loop, &refs, lhs, true, stmt); } } free (body); return refs; }
static tree rewrite_reciprocal (block_stmt_iterator *bsi) { tree stmt, lhs, rhs, stmt1, stmt2, var, name, tmp; tree real_one; stmt = bsi_stmt (*bsi); lhs = GENERIC_TREE_OPERAND (stmt, 0); rhs = GENERIC_TREE_OPERAND (stmt, 1); /* stmt must be GIMPLE_MODIFY_STMT. */ var = create_tmp_var (TREE_TYPE (rhs), "reciptmp"); add_referenced_var (var); DECL_GIMPLE_REG_P (var) = 1; if (TREE_CODE (TREE_TYPE (rhs)) == VECTOR_TYPE) { int i, len; tree list = NULL_TREE; real_one = build_real (TREE_TYPE (TREE_TYPE (rhs)), dconst1); len = TYPE_VECTOR_SUBPARTS (TREE_TYPE (rhs)); for (i = 0; i < len; i++) list = tree_cons (NULL, real_one, list); real_one = build_vector (TREE_TYPE (rhs), list); } else real_one = build_real (TREE_TYPE (rhs), dconst1); tmp = build2 (RDIV_EXPR, TREE_TYPE (rhs), real_one, TREE_OPERAND (rhs, 1)); stmt1 = build_gimple_modify_stmt (var, tmp); name = make_ssa_name (var, stmt1); GIMPLE_STMT_OPERAND (stmt1, 0) = name; tmp = build2 (MULT_EXPR, TREE_TYPE (rhs), name, TREE_OPERAND (rhs, 0)); stmt2 = build_gimple_modify_stmt (lhs, tmp); /* Replace division stmt with reciprocal and multiply stmts. The multiply stmt is not invariant, so update iterator and avoid rescanning. */ bsi_replace (bsi, stmt1, true); bsi_insert_after (bsi, stmt2, BSI_NEW_STMT); SSA_NAME_DEF_STMT (lhs) = stmt2; /* Continue processing with invariant reciprocal statement. */ return stmt1; }
static bool should_duplicate_loop_header_p (basic_block header, struct loop *loop, int *limit) { block_stmt_iterator bsi; tree last; /* Do not copy one block more than once (we do not really want to do loop peeling here). */ if (header->aux) return false; gcc_assert (EDGE_COUNT (header->succs) > 0); if (EDGE_COUNT (header->succs) == 1) return false; if (flow_bb_inside_loop_p (loop, EDGE_SUCC (header, 0)->dest) && flow_bb_inside_loop_p (loop, EDGE_SUCC (header, 1)->dest)) return false; /* If this is not the original loop header, we want it to have just one predecessor in order to match the && pattern. */ if (header != loop->header && EDGE_COUNT (header->preds) >= 2) return false; last = last_stmt (header); if (TREE_CODE (last) != COND_EXPR) return false; /* Approximately copy the conditions that used to be used in jump.c -- at most 20 insns and no calls. */ for (bsi = bsi_start (header); !bsi_end_p (bsi); bsi_next (&bsi)) { last = bsi_stmt (bsi); if (TREE_CODE (last) == LABEL_EXPR) continue; if (get_call_expr_in (last)) return false; *limit -= estimate_num_insns (last); if (*limit < 0) return false; } return true; }
static void mf_xform_derefs (void) { basic_block bb, next; block_stmt_iterator i; int saved_last_basic_block = last_basic_block; bb = ENTRY_BLOCK_PTR ->next_bb; do { next = bb->next_bb; for (i = bsi_start (bb); !bsi_end_p (i); bsi_next (&i)) { tree s = bsi_stmt (i); /* Only a few GIMPLE statements can reference memory. */ switch (TREE_CODE (s)) { case MODIFY_EXPR: mf_xform_derefs_1 (&i, &TREE_OPERAND (s, 0), EXPR_LOCUS (s), integer_one_node); mf_xform_derefs_1 (&i, &TREE_OPERAND (s, 1), EXPR_LOCUS (s), integer_zero_node); break; case RETURN_EXPR: if (TREE_OPERAND (s, 0) != NULL_TREE) { if (TREE_CODE (TREE_OPERAND (s, 0)) == MODIFY_EXPR) mf_xform_derefs_1 (&i, &TREE_OPERAND (TREE_OPERAND (s, 0), 1), EXPR_LOCUS (s), integer_zero_node); else mf_xform_derefs_1 (&i, &TREE_OPERAND (s, 0), EXPR_LOCUS (s), integer_zero_node); } break; default: ; } } bb = next; } while (bb && bb->index <= saved_last_basic_block); }
static void execute_return_slot_opt (void) { basic_block bb; FOR_EACH_BB (bb) { block_stmt_iterator i; for (i = bsi_start (bb); !bsi_end_p (i); bsi_next (&i)) { tree stmt = bsi_stmt (i); tree call; if (TREE_CODE (stmt) == MODIFY_EXPR && (call = TREE_OPERAND (stmt, 1), TREE_CODE (call) == CALL_EXPR) && !CALL_EXPR_RETURN_SLOT_OPT (call) && aggregate_value_p (call, call)) { def_operand_p def_p; ssa_op_iter op_iter; /* We determine whether or not the LHS address escapes by asking whether it is call clobbered. When the LHS isn't a simple decl, we need to check the VDEFs, so it's simplest to just loop through all the DEFs. */ FOR_EACH_SSA_DEF_OPERAND (def_p, stmt, op_iter, SSA_OP_ALL_DEFS) { tree def = DEF_FROM_PTR (def_p); if (TREE_CODE (def) == SSA_NAME) def = SSA_NAME_VAR (def); if (is_call_clobbered (def)) goto unsafe; } /* No defs are call clobbered, so the optimization is safe. */ CALL_EXPR_RETURN_SLOT_OPT (call) = 1; /* This is too late to mark the target addressable like we do in gimplify_modify_expr_rhs, but that's OK; anything that wasn't already addressable was handled there. */ unsafe:; } }
static void tree_ssa_forward_propagate_single_use_vars (void) { basic_block bb; cfg_changed = false; FOR_EACH_BB (bb) { block_stmt_iterator bsi; /* Note we update BSI within the loop as necessary. */ for (bsi = bsi_start (bb); !bsi_end_p (bsi); ) { tree stmt = bsi_stmt (bsi); /* If this statement sets an SSA_NAME to an address, try to propagate the address into the uses of the SSA_NAME. */ if (TREE_CODE (stmt) == MODIFY_EXPR && TREE_CODE (TREE_OPERAND (stmt, 1)) == ADDR_EXPR && TREE_CODE (TREE_OPERAND (stmt, 0)) == SSA_NAME) { if (forward_propagate_addr_expr (stmt)) bsi_remove (&bsi); else bsi_next (&bsi); } else if (TREE_CODE (stmt) == COND_EXPR) { forward_propagate_into_cond (stmt); bsi_next (&bsi); } else bsi_next (&bsi); } } if (cfg_changed) cleanup_tree_cfg (); }
static struct mem_ref_group * gather_memory_references (struct loop *loop) { basic_block *body = get_loop_body_in_dom_order (loop); basic_block bb; unsigned i; block_stmt_iterator bsi; tree stmt, lhs, rhs; struct mem_ref_group *refs = NULL; /* Scan the loop body in order, so that the former references precede the later ones. */ for (i = 0; i < loop->num_nodes; i++) { bb = body[i]; if (bb->loop_father != loop) continue; for (bsi = bsi_start (bb); !bsi_end_p (bsi); bsi_next (&bsi)) { stmt = bsi_stmt (bsi); if (TREE_CODE (stmt) != MODIFY_EXPR) continue; lhs = TREE_OPERAND (stmt, 0); rhs = TREE_OPERAND (stmt, 1); if (REFERENCE_CLASS_P (rhs)) gather_memory_references_ref (loop, &refs, rhs, false, stmt); if (REFERENCE_CLASS_P (lhs)) gather_memory_references_ref (loop, &refs, lhs, true, stmt); } } free (body); return refs; }
static void tree_nrv (void) { tree result = DECL_RESULT (current_function_decl); tree result_type = TREE_TYPE (result); tree found = NULL; basic_block bb; block_stmt_iterator bsi; struct nrv_data data; /* If this function does not return an aggregate type in memory, then there is nothing to do. */ if (!aggregate_value_p (result, current_function_decl)) return; /* Look through each block for assignments to the RESULT_DECL. */ FOR_EACH_BB (bb) { for (bsi = bsi_start (bb); !bsi_end_p (bsi); bsi_next (&bsi)) { tree stmt = bsi_stmt (bsi); tree ret_expr; if (TREE_CODE (stmt) == RETURN_EXPR) { /* In a function with an aggregate return value, the gimplifier has changed all non-empty RETURN_EXPRs to return the RESULT_DECL. */ ret_expr = TREE_OPERAND (stmt, 0); if (ret_expr) gcc_assert (ret_expr == result); } else if (TREE_CODE (stmt) == MODIFY_EXPR && TREE_OPERAND (stmt, 0) == result) { ret_expr = TREE_OPERAND (stmt, 1); /* Now verify that this return statement uses the same value as any previously encountered return statement. */ if (found != NULL) { /* If we found a return statement using a different variable than previous return statements, then we can not perform NRV optimizations. */ if (found != ret_expr) return; } else found = ret_expr; /* The returned value must be a local automatic variable of the same type and alignment as the function's result. */ if (TREE_CODE (found) != VAR_DECL || TREE_THIS_VOLATILE (found) || DECL_CONTEXT (found) != current_function_decl || TREE_STATIC (found) || TREE_ADDRESSABLE (found) || DECL_ALIGN (found) > DECL_ALIGN (result) || !lang_hooks.types_compatible_p (TREE_TYPE (found), result_type)) return; } } } if (!found) return; /* If dumping details, then note once and only the NRV replacement. */ if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, "NRV Replaced: "); print_generic_expr (dump_file, found, dump_flags); fprintf (dump_file, " with: "); print_generic_expr (dump_file, result, dump_flags); fprintf (dump_file, "\n"); } /* At this point we know that all the return statements return the same local which has suitable attributes for NRV. Copy debugging information from FOUND to RESULT. */ DECL_NAME (result) = DECL_NAME (found); DECL_SOURCE_LOCATION (result) = DECL_SOURCE_LOCATION (found); DECL_ABSTRACT_ORIGIN (result) = DECL_ABSTRACT_ORIGIN (found); TREE_ADDRESSABLE (result) = TREE_ADDRESSABLE (found); /* Now walk through the function changing all references to VAR to be RESULT. */ data.var = found; data.result = result; FOR_EACH_BB (bb) { for (bsi = bsi_start (bb); !bsi_end_p (bsi); ) { tree *tp = bsi_stmt_ptr (bsi); /* If this is a copy from VAR to RESULT, remove it. */ if (TREE_CODE (*tp) == MODIFY_EXPR && TREE_OPERAND (*tp, 0) == result && TREE_OPERAND (*tp, 1) == found) bsi_remove (&bsi); else { walk_tree (tp, finalize_nrv_r, &data, 0); bsi_next (&bsi); } } } /* FOUND is no longer used. Ensure it gets removed. */ var_ann (found)->used = 0; }
static tree rewrite_bittest (block_stmt_iterator *bsi) { tree stmt, lhs, rhs, var, name, use_stmt, stmt1, stmt2, t; use_operand_p use; stmt = bsi_stmt (*bsi); lhs = GENERIC_TREE_OPERAND (stmt, 0); rhs = GENERIC_TREE_OPERAND (stmt, 1); /* Verify that the single use of lhs is a comparison against zero. */ if (TREE_CODE (lhs) != SSA_NAME || !single_imm_use (lhs, &use, &use_stmt) || TREE_CODE (use_stmt) != COND_EXPR) return stmt; t = COND_EXPR_COND (use_stmt); if (TREE_OPERAND (t, 0) != lhs || (TREE_CODE (t) != NE_EXPR && TREE_CODE (t) != EQ_EXPR) || !integer_zerop (TREE_OPERAND (t, 1))) return stmt; /* Get at the operands of the shift. The rhs is TMP1 & 1. */ stmt1 = SSA_NAME_DEF_STMT (TREE_OPERAND (rhs, 0)); if (TREE_CODE (stmt1) != GIMPLE_MODIFY_STMT) return stmt; /* There is a conversion in between possibly inserted by fold. */ t = GIMPLE_STMT_OPERAND (stmt1, 1); if (TREE_CODE (t) == NOP_EXPR || TREE_CODE (t) == CONVERT_EXPR) { t = TREE_OPERAND (t, 0); if (TREE_CODE (t) != SSA_NAME || !has_single_use (t)) return stmt; stmt1 = SSA_NAME_DEF_STMT (t); if (TREE_CODE (stmt1) != GIMPLE_MODIFY_STMT) return stmt; t = GIMPLE_STMT_OPERAND (stmt1, 1); } /* Verify that B is loop invariant but A is not. Verify that with all the stmt walking we are still in the same loop. */ if (TREE_CODE (t) == RSHIFT_EXPR && loop_containing_stmt (stmt1) == loop_containing_stmt (stmt) && outermost_invariant_loop_expr (TREE_OPERAND (t, 1), loop_containing_stmt (stmt1)) != NULL && outermost_invariant_loop_expr (TREE_OPERAND (t, 0), loop_containing_stmt (stmt1)) == NULL) { tree a = TREE_OPERAND (t, 0); tree b = TREE_OPERAND (t, 1); /* 1 << B */ var = create_tmp_var (TREE_TYPE (a), "shifttmp"); add_referenced_var (var); t = fold_build2 (LSHIFT_EXPR, TREE_TYPE (a), build_int_cst (TREE_TYPE (a), 1), b); stmt1 = build_gimple_modify_stmt (var, t); name = make_ssa_name (var, stmt1); GIMPLE_STMT_OPERAND (stmt1, 0) = name; /* A & (1 << B) */ t = fold_build2 (BIT_AND_EXPR, TREE_TYPE (a), a, name); stmt2 = build_gimple_modify_stmt (var, t); name = make_ssa_name (var, stmt2); GIMPLE_STMT_OPERAND (stmt2, 0) = name; /* Replace the SSA_NAME we compare against zero. Adjust the type of zero accordingly. */ SET_USE (use, name); TREE_OPERAND (COND_EXPR_COND (use_stmt), 1) = build_int_cst_type (TREE_TYPE (name), 0); bsi_insert_before (bsi, stmt1, BSI_SAME_STMT); bsi_replace (bsi, stmt2, true); return stmt1; } return stmt; }
static basic_block expand_gimple_basic_block (basic_block bb, FILE * dump_file) { block_stmt_iterator bsi = bsi_start (bb); tree stmt = NULL; rtx note, last; edge e; edge_iterator ei; if (dump_file) { fprintf (dump_file, "\n;; Generating RTL for tree basic block %d\n", bb->index); } if (!bsi_end_p (bsi)) stmt = bsi_stmt (bsi); if (stmt && TREE_CODE (stmt) == LABEL_EXPR) { last = get_last_insn (); expand_expr_stmt (stmt); /* Java emits line number notes in the top of labels. ??? Make this go away once line number notes are obsoleted. */ BB_HEAD (bb) = NEXT_INSN (last); if (NOTE_P (BB_HEAD (bb))) BB_HEAD (bb) = NEXT_INSN (BB_HEAD (bb)); bsi_next (&bsi); note = emit_note_after (NOTE_INSN_BASIC_BLOCK, BB_HEAD (bb)); maybe_dump_rtl_for_tree_stmt (stmt, last); } else note = BB_HEAD (bb) = emit_note (NOTE_INSN_BASIC_BLOCK); NOTE_BASIC_BLOCK (note) = bb; for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); ) { /* Clear EDGE_EXECUTABLE. This flag is never used in the backend. */ e->flags &= ~EDGE_EXECUTABLE; /* At the moment not all abnormal edges match the RTL representation. It is safe to remove them here as find_sub_basic_blocks will rediscover them. In the future we should get this fixed properly. */ if (e->flags & EDGE_ABNORMAL) remove_edge (e); else ei_next (&ei); } for (; !bsi_end_p (bsi); bsi_next (&bsi)) { tree stmt = bsi_stmt (bsi); basic_block new_bb; if (!stmt) continue; /* Expand this statement, then evaluate the resulting RTL and fixup the CFG accordingly. */ if (TREE_CODE (stmt) == COND_EXPR) { new_bb = expand_gimple_cond_expr (bb, stmt); if (new_bb) return new_bb; } else { tree call = get_call_expr_in (stmt); if (call && CALL_EXPR_TAILCALL (call)) { bool can_fallthru; new_bb = expand_gimple_tailcall (bb, stmt, &can_fallthru); if (new_bb) { if (can_fallthru) bb = new_bb; else return new_bb; } } else { last = get_last_insn (); expand_expr_stmt (stmt); maybe_dump_rtl_for_tree_stmt (stmt, last); } } } do_pending_stack_adjust (); /* Find the block tail. The last insn in the block is the insn before a barrier and/or table jump insn. */ last = get_last_insn (); if (BARRIER_P (last)) last = PREV_INSN (last); if (JUMP_TABLE_DATA_P (last)) last = PREV_INSN (PREV_INSN (last)); BB_END (bb) = last; update_bb_for_insn (bb); return bb; }
static void mf_build_check_statement_for (tree base, tree limit, block_stmt_iterator *instr_bsi, location_t *locus, tree dirflag) { tree_stmt_iterator head, tsi; block_stmt_iterator bsi; basic_block cond_bb, then_bb, join_bb; edge e; tree cond, t, u, v; tree mf_base; tree mf_elem; tree mf_limit; /* We first need to split the current basic block, and start altering the CFG. This allows us to insert the statements we're about to construct into the right basic blocks. */ cond_bb = bb_for_stmt (bsi_stmt (*instr_bsi)); bsi = *instr_bsi; bsi_prev (&bsi); if (! bsi_end_p (bsi)) e = split_block (cond_bb, bsi_stmt (bsi)); else e = split_block_after_labels (cond_bb); cond_bb = e->src; join_bb = e->dest; /* A recap at this point: join_bb is the basic block at whose head is the gimple statement for which this check expression is being built. cond_bb is the (possibly new, synthetic) basic block the end of which will contain the cache-lookup code, and a conditional that jumps to the cache-miss code or, much more likely, over to join_bb. */ /* Create the bb that contains the cache-miss fallback block (mf_check). */ then_bb = create_empty_bb (cond_bb); make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE); make_single_succ_edge (then_bb, join_bb, EDGE_FALLTHRU); /* Mark the pseudo-fallthrough edge from cond_bb to join_bb. */ e = find_edge (cond_bb, join_bb); e->flags = EDGE_FALSE_VALUE; e->count = cond_bb->count; e->probability = REG_BR_PROB_BASE; /* Update dominance info. Note that bb_join's data was updated by split_block. */ if (dom_info_available_p (CDI_DOMINATORS)) { set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb); set_immediate_dominator (CDI_DOMINATORS, join_bb, cond_bb); } /* Build our local variables. */ mf_elem = create_tmp_var (mf_cache_structptr_type, "__mf_elem"); mf_base = create_tmp_var (mf_uintptr_type, "__mf_base"); mf_limit = create_tmp_var (mf_uintptr_type, "__mf_limit"); /* Build: __mf_base = (uintptr_t) <base address expression>. */ t = build2 (MODIFY_EXPR, void_type_node, mf_base, convert (mf_uintptr_type, unshare_expr (base))); SET_EXPR_LOCUS (t, locus); gimplify_to_stmt_list (&t); head = tsi_start (t); tsi = tsi_last (t); /* Build: __mf_limit = (uintptr_t) <limit address expression>. */ t = build2 (MODIFY_EXPR, void_type_node, mf_limit, convert (mf_uintptr_type, unshare_expr (limit))); SET_EXPR_LOCUS (t, locus); gimplify_to_stmt_list (&t); tsi_link_after (&tsi, t, TSI_CONTINUE_LINKING); /* Build: __mf_elem = &__mf_lookup_cache [(__mf_base >> __mf_shift) & __mf_mask]. */ t = build2 (RSHIFT_EXPR, mf_uintptr_type, mf_base, (flag_mudflap_threads ? mf_cache_shift_decl : mf_cache_shift_decl_l)); t = build2 (BIT_AND_EXPR, mf_uintptr_type, t, (flag_mudflap_threads ? mf_cache_mask_decl : mf_cache_mask_decl_l)); t = build4 (ARRAY_REF, TREE_TYPE (TREE_TYPE (mf_cache_array_decl)), mf_cache_array_decl, t, NULL_TREE, NULL_TREE); t = build1 (ADDR_EXPR, mf_cache_structptr_type, t); t = build2 (MODIFY_EXPR, void_type_node, mf_elem, t); SET_EXPR_LOCUS (t, locus); gimplify_to_stmt_list (&t); tsi_link_after (&tsi, t, TSI_CONTINUE_LINKING); /* Quick validity check. if (__mf_elem->low > __mf_base || (__mf_elem_high < __mf_limit)) { __mf_check (); ... and only if single-threaded: __mf_lookup_shift_1 = f...; __mf_lookup_mask_l = ...; } It is expected that this body of code is rarely executed so we mark the edge to the THEN clause of the conditional jump as unlikely. */ /* Construct t <-- '__mf_elem->low > __mf_base'. */ t = build3 (COMPONENT_REF, mf_uintptr_type, build1 (INDIRECT_REF, mf_cache_struct_type, mf_elem), TYPE_FIELDS (mf_cache_struct_type), NULL_TREE); t = build2 (GT_EXPR, boolean_type_node, t, mf_base); /* Construct '__mf_elem->high < __mf_limit'. First build: 1) u <-- '__mf_elem->high' 2) v <-- '__mf_limit'. Then build 'u <-- (u < v). */ u = build3 (COMPONENT_REF, mf_uintptr_type, build1 (INDIRECT_REF, mf_cache_struct_type, mf_elem), TREE_CHAIN (TYPE_FIELDS (mf_cache_struct_type)), NULL_TREE); v = mf_limit; u = build2 (LT_EXPR, boolean_type_node, u, v); /* Build the composed conditional: t <-- 't || u'. Then store the result of the evaluation of 't' in a temporary variable which we can use as the condition for the conditional jump. */ t = build2 (TRUTH_OR_EXPR, boolean_type_node, t, u); cond = create_tmp_var (boolean_type_node, "__mf_unlikely_cond"); t = build2 (MODIFY_EXPR, boolean_type_node, cond, t); gimplify_to_stmt_list (&t); tsi_link_after (&tsi, t, TSI_CONTINUE_LINKING); /* Build the conditional jump. 'cond' is just a temporary so we can simply build a void COND_EXPR. We do need labels in both arms though. */ t = build3 (COND_EXPR, void_type_node, cond, build1 (GOTO_EXPR, void_type_node, tree_block_label (then_bb)), build1 (GOTO_EXPR, void_type_node, tree_block_label (join_bb))); SET_EXPR_LOCUS (t, locus); tsi_link_after (&tsi, t, TSI_CONTINUE_LINKING); /* At this point, after so much hard work, we have only constructed the conditional jump, if (__mf_elem->low > __mf_base || (__mf_elem_high < __mf_limit)) The lowered GIMPLE tree representing this code is in the statement list starting at 'head'. We can insert this now in the current basic block, i.e. the one that the statement we're instrumenting was originally in. */ bsi = bsi_last (cond_bb); for (tsi = head; ! tsi_end_p (tsi); tsi_next (&tsi)) bsi_insert_after (&bsi, tsi_stmt (tsi), BSI_CONTINUE_LINKING); /* Now build up the body of the cache-miss handling: __mf_check(); refresh *_l vars. This is the body of the conditional. */ u = tree_cons (NULL_TREE, mf_file_function_line_tree (locus == NULL ? UNKNOWN_LOCATION : *locus), NULL_TREE); u = tree_cons (NULL_TREE, dirflag, u); /* NB: we pass the overall [base..limit] range to mf_check. */ u = tree_cons (NULL_TREE, fold_build2 (PLUS_EXPR, integer_type_node, fold_build2 (MINUS_EXPR, mf_uintptr_type, mf_limit, mf_base), integer_one_node), u); u = tree_cons (NULL_TREE, mf_base, u); t = build_function_call_expr (mf_check_fndecl, u); gimplify_to_stmt_list (&t); head = tsi_start (t); tsi = tsi_last (t); if (! flag_mudflap_threads) { t = build2 (MODIFY_EXPR, void_type_node, mf_cache_shift_decl_l, mf_cache_shift_decl); tsi_link_after (&tsi, t, TSI_CONTINUE_LINKING); t = build2 (MODIFY_EXPR, void_type_node, mf_cache_mask_decl_l, mf_cache_mask_decl); tsi_link_after (&tsi, t, TSI_CONTINUE_LINKING); } /* Insert the check code in the THEN block. */ bsi = bsi_start (then_bb); for (tsi = head; ! tsi_end_p (tsi); tsi_next (&tsi)) bsi_insert_after (&bsi, tsi_stmt (tsi), BSI_CONTINUE_LINKING); *instr_bsi = bsi_start (join_bb); bsi_next (instr_bsi); }
static bool init_dont_simulate_again (void) { basic_block bb; block_stmt_iterator bsi; tree phi; bool saw_a_complex_op = false; FOR_EACH_BB (bb) { for (phi = phi_nodes (bb); phi; phi = PHI_CHAIN (phi)) DONT_SIMULATE_AGAIN (phi) = !is_complex_reg (PHI_RESULT (phi)); for (bsi = bsi_start (bb); !bsi_end_p (bsi); bsi_next (&bsi)) { tree orig_stmt, stmt, rhs = NULL; bool dsa; orig_stmt = stmt = bsi_stmt (bsi); /* Most control-altering statements must be initially simulated, else we won't cover the entire cfg. */ dsa = !stmt_ends_bb_p (stmt); switch (TREE_CODE (stmt)) { case RETURN_EXPR: /* We don't care what the lattice value of <retval> is, since it's never used as an input to another computation. */ dsa = true; stmt = TREE_OPERAND (stmt, 0); if (!stmt || TREE_CODE (stmt) != MODIFY_EXPR) break; /* FALLTHRU */ case MODIFY_EXPR: dsa = !is_complex_reg (TREE_OPERAND (stmt, 0)); rhs = TREE_OPERAND (stmt, 1); break; case COND_EXPR: rhs = TREE_OPERAND (stmt, 0); break; default: break; } if (rhs) switch (TREE_CODE (rhs)) { case EQ_EXPR: case NE_EXPR: rhs = TREE_OPERAND (rhs, 0); /* FALLTHRU */ case PLUS_EXPR: case MINUS_EXPR: case MULT_EXPR: case TRUNC_DIV_EXPR: case CEIL_DIV_EXPR: case FLOOR_DIV_EXPR: case ROUND_DIV_EXPR: case RDIV_EXPR: case NEGATE_EXPR: case CONJ_EXPR: if (TREE_CODE (TREE_TYPE (rhs)) == COMPLEX_TYPE) saw_a_complex_op = true; break; default: break; } DONT_SIMULATE_AGAIN (orig_stmt) = dsa; } } return saw_a_complex_op; }
static bool init_dont_simulate_again (void) { basic_block bb; block_stmt_iterator bsi; tree phi; bool saw_a_complex_op = false; FOR_EACH_BB (bb) { for (phi = phi_nodes (bb); phi; phi = PHI_CHAIN (phi)) DONT_SIMULATE_AGAIN (phi) = !is_complex_reg (PHI_RESULT (phi)); for (bsi = bsi_start (bb); !bsi_end_p (bsi); bsi_next (&bsi)) { tree orig_stmt, stmt, rhs = NULL; bool dsa; orig_stmt = stmt = bsi_stmt (bsi); /* Most control-altering statements must be initially simulated, else we won't cover the entire cfg. */ dsa = !stmt_ends_bb_p (stmt); switch (TREE_CODE (stmt)) { case RETURN_EXPR: /* We don't care what the lattice value of <retval> is, since it's never used as an input to another computation. */ dsa = true; stmt = TREE_OPERAND (stmt, 0); if (!stmt || TREE_CODE (stmt) != GIMPLE_MODIFY_STMT) break; /* FALLTHRU */ case GIMPLE_MODIFY_STMT: dsa = !is_complex_reg (GIMPLE_STMT_OPERAND (stmt, 0)); rhs = GIMPLE_STMT_OPERAND (stmt, 1); break; case COND_EXPR: rhs = TREE_OPERAND (stmt, 0); break; default: break; } if (rhs) switch (TREE_CODE (rhs)) { case EQ_EXPR: case NE_EXPR: rhs = TREE_OPERAND (rhs, 0); /* FALLTHRU */ case PLUS_EXPR: case MINUS_EXPR: case MULT_EXPR: case TRUNC_DIV_EXPR: case CEIL_DIV_EXPR: case FLOOR_DIV_EXPR: case ROUND_DIV_EXPR: case RDIV_EXPR: case NEGATE_EXPR: case CONJ_EXPR: if (TREE_CODE (TREE_TYPE (rhs)) == COMPLEX_TYPE) saw_a_complex_op = true; break; case REALPART_EXPR: case IMAGPART_EXPR: /* The total store transformation performed during gimplification creates such uninitialized loads and we need to lower the statement to be able to fix things up. */ if (TREE_CODE (TREE_OPERAND (rhs, 0)) == SSA_NAME && ssa_undefined_value_p (TREE_OPERAND (rhs, 0))) saw_a_complex_op = true; break; default: break; } DONT_SIMULATE_AGAIN (orig_stmt) = dsa; } } return saw_a_complex_op; }
static bool tree_if_conversion (struct loop *loop, bool for_vectorizer) { basic_block bb; block_stmt_iterator itr; unsigned int i; ifc_bbs = NULL; /* if-conversion is not appropriate for all loops. First, check if loop is if-convertible or not. */ if (!if_convertible_loop_p (loop, for_vectorizer)) { if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file,"-------------------------\n"); if (ifc_bbs) { free (ifc_bbs); ifc_bbs = NULL; } free_dominance_info (CDI_POST_DOMINATORS); return false; } /* Do actual work now. */ for (i = 0; i < loop->num_nodes; i++) { tree cond; bb = ifc_bbs [i]; /* Update condition using predicate list. */ cond = bb->aux; /* Process all statements in this basic block. Remove conditional expression, if any, and annotate destination basic block(s) appropriately. */ for (itr = bsi_start (bb); !bsi_end_p (itr); /* empty */) { tree t = bsi_stmt (itr); cond = tree_if_convert_stmt (loop, t, cond, &itr); if (!bsi_end_p (itr)) bsi_next (&itr); } /* If current bb has only one successor, then consider it as an unconditional goto. */ if (single_succ_p (bb)) { basic_block bb_n = single_succ (bb); if (cond != NULL_TREE) add_to_predicate_list (bb_n, cond); } } /* Now, all statements are if-converted and basic blocks are annotated appropriately. Combine all basic block into one huge basic block. */ combine_blocks (loop); /* clean up */ clean_predicate_lists (loop); free (ifc_bbs); ifc_bbs = NULL; return true; }