static bool forward_propagate_addr_into_variable_array_index (tree offset, tree def_rhs, gimple_stmt_iterator *use_stmt_gsi) { tree index; gimple offset_def, use_stmt = gsi_stmt (*use_stmt_gsi); /* Get the offset's defining statement. */ offset_def = SSA_NAME_DEF_STMT (offset); /* Try to find an expression for a proper index. This is either a multiplication expression by the element size or just the ssa name we came along in case the element size is one. In that case, however, we do not allow multiplications because they can be computing index to a higher level dimension (PR 37861). */ if (integer_onep (TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (def_rhs))))) { if (is_gimple_assign (offset_def) && gimple_assign_rhs_code (offset_def) == MULT_EXPR) return false; index = offset; } else { /* The statement which defines OFFSET before type conversion must be a simple GIMPLE_ASSIGN. */ if (!is_gimple_assign (offset_def)) return false; /* The RHS of the statement which defines OFFSET must be a multiplication of an object by the size of the array elements. This implicitly verifies that the size of the array elements is constant. */ offset = gimple_assign_rhs1 (offset_def); if (gimple_assign_rhs_code (offset_def) != MULT_EXPR || TREE_CODE (gimple_assign_rhs2 (offset_def)) != INTEGER_CST || !simple_cst_equal (gimple_assign_rhs2 (offset_def), TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (def_rhs))))) return false; /* The first operand to the MULT_EXPR is the desired index. */ index = offset; } /* Replace the pointer addition with array indexing. */ gimple_assign_set_rhs_from_tree (use_stmt_gsi, unshare_expr (def_rhs)); use_stmt = gsi_stmt (*use_stmt_gsi); TREE_OPERAND (TREE_OPERAND (gimple_assign_rhs1 (use_stmt), 0), 1) = index; /* That should have created gimple, so there is no need to record information to undo the propagation. */ fold_stmt_inplace (use_stmt); tidy_after_forward_propagate_addr (use_stmt); return true; }
/* Check whether G is a potential conditional compare candidate. */ static bool ccmp_candidate_p (gimple *g) { tree rhs = gimple_assign_rhs_to_tree (g); tree lhs, op0, op1; gimple *gs0, *gs1; enum tree_code tcode, tcode0, tcode1; tcode = TREE_CODE (rhs); if (tcode != BIT_AND_EXPR && tcode != BIT_IOR_EXPR) return false; lhs = gimple_assign_lhs (g); op0 = TREE_OPERAND (rhs, 0); op1 = TREE_OPERAND (rhs, 1); if ((TREE_CODE (op0) != SSA_NAME) || (TREE_CODE (op1) != SSA_NAME) || !has_single_use (lhs)) return false; gs0 = get_gimple_for_ssa_name (op0); gs1 = get_gimple_for_ssa_name (op1); if (!gs0 || !gs1 || !is_gimple_assign (gs0) || !is_gimple_assign (gs1) /* g, gs0 and gs1 must be in the same basic block, since current stage is out-of-ssa. We can not guarantee the correctness when forwording the gs0 and gs1 into g whithout DATAFLOW analysis. */ || gimple_bb (gs0) != gimple_bb (gs1) || gimple_bb (gs0) != gimple_bb (g)) return false; if (!(INTEGRAL_TYPE_P (TREE_TYPE (gimple_assign_rhs1 (gs0))) || POINTER_TYPE_P (TREE_TYPE (gimple_assign_rhs1 (gs0)))) || !(INTEGRAL_TYPE_P (TREE_TYPE (gimple_assign_rhs1 (gs1))) || POINTER_TYPE_P (TREE_TYPE (gimple_assign_rhs1 (gs1))))) return false; tcode0 = gimple_assign_rhs_code (gs0); tcode1 = gimple_assign_rhs_code (gs1); if (TREE_CODE_CLASS (tcode0) == tcc_comparison && TREE_CODE_CLASS (tcode1) == tcc_comparison) return true; if (TREE_CODE_CLASS (tcode0) == tcc_comparison && ccmp_candidate_p (gs1)) return true; else if (TREE_CODE_CLASS (tcode1) == tcc_comparison && ccmp_candidate_p (gs0)) return true; /* We skip ccmp_candidate_p (gs1) && ccmp_candidate_p (gs0) since there is no way to set the CC flag. */ return false; }
/* Return whether USE_STMT is a floating-point division by DEF. */ static inline bool is_division_by (gimple use_stmt, tree def) { return is_gimple_assign (use_stmt) && gimple_assign_rhs_code (use_stmt) == RDIV_EXPR && gimple_assign_rhs2 (use_stmt) == def /* Do not recognize x / x as valid division, as we are getting confused later by replacing all immediate uses x in such a stmt. */ && gimple_assign_rhs1 (use_stmt) != def; }
static void insert_trap_and_remove_trailing_statements (gimple_stmt_iterator *si_p, tree op) { /* We want the NULL pointer dereference to actually occur so that code that wishes to catch the signal can do so. If the dereference is a load, then there's nothing to do as the LHS will be a throw-away SSA_NAME and the RHS is the NULL dereference. If the dereference is a store and we can easily transform the RHS, then simplify the RHS to enable more DCE. Note that we require the statement to be a GIMPLE_ASSIGN which filters out calls on the RHS. */ gimple stmt = gsi_stmt (*si_p); if (walk_stmt_load_store_ops (stmt, (void *)op, NULL, check_loadstore) && is_gimple_assign (stmt) && INTEGRAL_TYPE_P (TREE_TYPE (gimple_assign_lhs (stmt)))) { /* We just need to turn the RHS into zero converted to the proper type. */ tree type = TREE_TYPE (gimple_assign_lhs (stmt)); gimple_assign_set_rhs_code (stmt, INTEGER_CST); gimple_assign_set_rhs1 (stmt, fold_convert (type, integer_zero_node)); update_stmt (stmt); } gimple new_stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_TRAP), 0); gimple_seq seq = NULL; gimple_seq_add_stmt (&seq, new_stmt); /* If we had a NULL pointer dereference, then we want to insert the __builtin_trap after the statement, for the other cases we want to insert before the statement. */ if (walk_stmt_load_store_ops (stmt, (void *)op, check_loadstore, check_loadstore)) gsi_insert_after (si_p, seq, GSI_NEW_STMT); else gsi_insert_before (si_p, seq, GSI_NEW_STMT); /* We must remove statements from the end of the block so that we never reference a released SSA_NAME. */ basic_block bb = gimple_bb (gsi_stmt (*si_p)); for (gimple_stmt_iterator si = gsi_last_bb (bb); gsi_stmt (si) != gsi_stmt (*si_p); si = gsi_last_bb (bb)) { stmt = gsi_stmt (si); unlink_stmt_vdef (stmt); gsi_remove (&si, true); release_defs (stmt); } }
static bool can_propagate_from (gimple def_stmt) { use_operand_p use_p; ssa_op_iter iter; gcc_assert (is_gimple_assign (def_stmt)); /* If the rhs has side-effects we cannot propagate from it. */ if (gimple_has_volatile_ops (def_stmt)) return false; /* If the rhs is a load we cannot propagate from it. */ if (TREE_CODE_CLASS (gimple_assign_rhs_code (def_stmt)) == tcc_reference || TREE_CODE_CLASS (gimple_assign_rhs_code (def_stmt)) == tcc_declaration) return false; /* Constants can be always propagated. */ if (is_gimple_min_invariant (rhs_to_tree (TREE_TYPE (gimple_assign_lhs (def_stmt)), def_stmt))) return true; /* We cannot propagate ssa names that occur in abnormal phi nodes. */ FOR_EACH_SSA_USE_OPERAND (use_p, def_stmt, iter, SSA_OP_USE) if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (USE_FROM_PTR (use_p))) return false; /* If the definition is a conversion of a pointer to a function type, then we can not apply optimizations as some targets require function pointers to be canonicalized and in this case this optimization could eliminate a necessary canonicalization. */ if (is_gimple_assign (def_stmt) && (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt)))) { tree rhs = gimple_assign_rhs1 (def_stmt); if (POINTER_TYPE_P (TREE_TYPE (rhs)) && TREE_CODE (TREE_TYPE (TREE_TYPE (rhs))) == FUNCTION_TYPE) return false; } return true; }
static bool widened_name_p (tree name, gimple use_stmt, tree *half_type, gimple *def_stmt, bool check_sign) { tree dummy; gimple dummy_gimple; loop_vec_info loop_vinfo; stmt_vec_info stmt_vinfo; tree type = TREE_TYPE (name); tree oprnd0; enum vect_def_type dt; tree def; stmt_vinfo = vinfo_for_stmt (use_stmt); loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo); if (!vect_is_simple_use (name, loop_vinfo, NULL, def_stmt, &def, &dt)) return false; if (dt != vect_internal_def && dt != vect_external_def && dt != vect_constant_def) return false; if (! *def_stmt) return false; if (!is_gimple_assign (*def_stmt)) return false; if (gimple_assign_rhs_code (*def_stmt) != NOP_EXPR) return false; oprnd0 = gimple_assign_rhs1 (*def_stmt); *half_type = TREE_TYPE (oprnd0); if (!INTEGRAL_TYPE_P (type) || !INTEGRAL_TYPE_P (*half_type) || ((TYPE_UNSIGNED (type) != TYPE_UNSIGNED (*half_type)) && check_sign) || (TYPE_PRECISION (type) < (TYPE_PRECISION (*half_type) * 2))) return false; if (!vect_is_simple_use (oprnd0, loop_vinfo, NULL, &dummy_gimple, &dummy, &dt)) return false; return true; }
static gimple get_prop_source_stmt (tree name, bool single_use_only, bool *single_use_p) { bool single_use = true; do { gimple def_stmt = SSA_NAME_DEF_STMT (name); if (!has_single_use (name)) { single_use = false; if (single_use_only) return NULL; } /* If name is defined by a PHI node or is the default def, bail out. */ if (!is_gimple_assign (def_stmt)) return NULL; /* If def_stmt is not a simple copy, we possibly found it. */ if (!gimple_assign_ssa_name_copy_p (def_stmt)) { tree rhs; if (!single_use_only && single_use_p) *single_use_p = single_use; /* We can look through pointer conversions in the search for a useful stmt for the comparison folding. */ rhs = gimple_assign_rhs1 (def_stmt); if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt)) && TREE_CODE (rhs) == SSA_NAME && POINTER_TYPE_P (TREE_TYPE (gimple_assign_lhs (def_stmt))) && POINTER_TYPE_P (TREE_TYPE (rhs))) name = rhs; else return def_stmt; } else { /* Continue searching the def of the copy source name. */ name = gimple_assign_rhs1 (def_stmt); } } while (1); }
bool may_propagate_copy_into_stmt (gimple dest, tree orig) { tree type_d; tree type_o; /* If the statement is a switch or a single-rhs assignment, then the expression to be replaced by the propagation may be an SSA_NAME. Fortunately, there is an explicit tree for the expression, so we delegate to may_propagate_copy. */ if (gimple_assign_single_p (dest)) return may_propagate_copy (gimple_assign_rhs1 (dest), orig); else if (gimple_code (dest) == GIMPLE_SWITCH) return may_propagate_copy (gimple_switch_index (dest), orig); /* In other cases, the expression is not materialized, so there is no destination to pass to may_propagate_copy. On the other hand, the expression cannot be an SSA_NAME, so the analysis is much simpler. */ if (TREE_CODE (orig) == SSA_NAME && (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (orig) || TREE_CODE (SSA_NAME_VAR (orig)) == MEMORY_PARTITION_TAG)) return false; if (is_gimple_assign (dest)) type_d = TREE_TYPE (gimple_assign_lhs (dest)); else if (gimple_code (dest) == GIMPLE_COND) type_d = boolean_type_node; else if (is_gimple_call (dest) && gimple_call_lhs (dest) != NULL_TREE) type_d = TREE_TYPE (gimple_call_lhs (dest)); else gcc_unreachable (); type_o = TREE_TYPE (orig); if (!useless_type_conversion_p (type_d, type_o)) return false; return true; }
static tree get_name_for_bit_test (tree candidate) { /* Skip single-use names in favor of using the name from a non-widening conversion definition. */ if (TREE_CODE (candidate) == SSA_NAME && has_single_use (candidate)) { gimple def_stmt = SSA_NAME_DEF_STMT (candidate); if (is_gimple_assign (def_stmt) && CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt))) { if (TYPE_PRECISION (TREE_TYPE (candidate)) <= TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))) return gimple_assign_rhs1 (def_stmt); } } return candidate; }
static bool recognize_bits_test (gimple cond, tree *name, tree *bits) { gimple stmt; /* Get at the definition of the result of the bit test. */ if (gimple_cond_code (cond) != NE_EXPR || TREE_CODE (gimple_cond_lhs (cond)) != SSA_NAME || !integer_zerop (gimple_cond_rhs (cond))) return false; stmt = SSA_NAME_DEF_STMT (gimple_cond_lhs (cond)); if (!is_gimple_assign (stmt) || gimple_assign_rhs_code (stmt) != BIT_AND_EXPR) return false; *name = get_name_for_bit_test (gimple_assign_rhs1 (stmt)); *bits = gimple_assign_rhs2 (stmt); return true; }
static unsigned int slimer_exec(void) { basic_block bb; gimple stmt; gimple_stmt_iterator gsi; if (has_been_processed(cfun->decl)) return 0; if (DECL_EXTERNAL(cfun->decl)) return 0; if (get_identifier(get_name(cfun->decl)) == get_identifier("main")) insert_slimer_init(); /* Go through the basic blocks of this function */ FOR_EACH_BB(bb) for (gsi=gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) { stmt = gsi_stmt(gsi); if (is_gimple_call(stmt) || is_gimple_assign(stmt)) { /* If its a call to a function we added already (junk or some * initlization functions), or a function we have previously * analyized, avoid inserting junk data. */ if (is_gimple_call(stmt) && !has_been_processed(gimple_call_fn(stmt))) continue; else if ((max_calls > 0) && ((rand() % 2) == 0)) { insert_call_to_junk_fn(stmt); --max_calls; } } } /* Mark as being analyized so we avoid trying to junkify it again */ VEC_safe_push(tree, gc, analyized_fns, cfun->decl); return 0; }
void propagate_tree_value_into_stmt (gimple_stmt_iterator *gsi, tree val) { gimple stmt = gsi_stmt (*gsi); if (is_gimple_assign (stmt)) { tree expr = NULL_TREE; if (gimple_assign_single_p (stmt)) expr = gimple_assign_rhs1 (stmt); propagate_tree_value (&expr, val); gimple_assign_set_rhs_from_tree (gsi, expr); stmt = gsi_stmt (*gsi); } else if (gimple_code (stmt) == GIMPLE_COND) { tree lhs = NULL_TREE; tree rhs = fold_convert (TREE_TYPE (val), integer_zero_node); propagate_tree_value (&lhs, val); gimple_cond_set_code (stmt, NE_EXPR); gimple_cond_set_lhs (stmt, lhs); gimple_cond_set_rhs (stmt, rhs); } else if (is_gimple_call (stmt) && gimple_call_lhs (stmt) != NULL_TREE) { gimple new_stmt; tree expr = NULL_TREE; propagate_tree_value (&expr, val); new_stmt = gimple_build_assign (gimple_call_lhs (stmt), expr); copy_virtual_operands (new_stmt, stmt); move_ssa_defining_stmt_for_defs (new_stmt, stmt); gsi_replace (gsi, new_stmt, false); } else if (gimple_code (stmt) == GIMPLE_SWITCH) propagate_tree_value (gimple_switch_index_ptr (stmt), val); else gcc_unreachable (); }
static unsigned int ubsan_pass (void) { basic_block bb; gimple_stmt_iterator gsi; FOR_EACH_BB_FN (bb, cfun) { for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi);) { gimple stmt = gsi_stmt (gsi); if (is_gimple_debug (stmt) || gimple_clobber_p (stmt)) { gsi_next (&gsi); continue; } if ((flag_sanitize & SANITIZE_SI_OVERFLOW) && is_gimple_assign (stmt)) instrument_si_overflow (gsi); if (flag_sanitize & SANITIZE_NULL) { if (gimple_store_p (stmt)) instrument_null (gsi, true); if (gimple_assign_load_p (stmt)) instrument_null (gsi, false); } if (flag_sanitize & (SANITIZE_BOOL | SANITIZE_ENUM) && gimple_assign_load_p (stmt)) instrument_bool_enum_load (&gsi); gsi_next (&gsi); } } return 0; }
void propagate_tree_value_into_stmt (gimple_stmt_iterator *gsi, tree val) { gimple stmt = gsi_stmt (*gsi); if (is_gimple_assign (stmt)) { tree expr = NULL_TREE; if (gimple_assign_single_p (stmt)) expr = gimple_assign_rhs1 (stmt); propagate_tree_value (&expr, val); gimple_assign_set_rhs_from_tree (gsi, expr); } else if (gimple_code (stmt) == GIMPLE_COND) { tree lhs = NULL_TREE; tree rhs = build_zero_cst (TREE_TYPE (val)); propagate_tree_value (&lhs, val); gimple_cond_set_code (stmt, NE_EXPR); gimple_cond_set_lhs (stmt, lhs); gimple_cond_set_rhs (stmt, rhs); } else if (is_gimple_call (stmt) && gimple_call_lhs (stmt) != NULL_TREE) { tree expr = NULL_TREE; bool res; propagate_tree_value (&expr, val); res = update_call_from_tree (gsi, expr); gcc_assert (res); } else if (gimple_code (stmt) == GIMPLE_SWITCH) propagate_tree_value (gimple_switch_index_ptr (stmt), val); else gcc_unreachable (); }
static void restrict_range_to_consts() { size_t i; unsigned num_vr_values = num_ssa_names; for (i = 0; i < num_vr_values; i++) if (vr_value[i]) { value_range_t *vr = vr_value[i]; tree type = TREE_TYPE (ssa_name(i)); tree minimum = NULL; tree maximum = NULL; unsigned var_prec = TYPE_PRECISION(type); //fprintf(stderr, "%ld\n", i); if (INTEGRAL_TYPE_P(type) && vr->min && vr->max) { bool is_neg_inf = is_negative_overflow_infinity (vr->min) || (INTEGRAL_TYPE_P (type) && !TYPE_UNSIGNED (type) && vrp_val_is_min (vr->min)); bool is_pos_inf = is_positive_overflow_infinity (vr->max) || (INTEGRAL_TYPE_P (type) && vrp_val_is_max (vr->max)); if(TREE_CODE (vr->min) != INTEGER_CST && !is_neg_inf) { /// check if greater than zero bool strict_overflow_p; tree val = compare_name_with_value(GE_EXPR, ssa_name(i), integer_zero_node, &strict_overflow_p); if(!strict_overflow_p && val) { if(integer_onep (val)) { minimum = integer_zero_node; } else { tree neg_const; unsigned prec_index = 1; while(prec_index < var_prec && !strict_overflow_p) { neg_const = build_int_cst (type, -(((unsigned HOST_WIDE_INT)1) << prec_index)); tree val = compare_name_with_value(GE_EXPR, ssa_name(i), neg_const, &strict_overflow_p); if(val && integer_onep (val)) { minimum = neg_const; break; } ++prec_index; } } } } else if(is_neg_inf) minimum = vr->min; if(TREE_CODE (vr->max) != INTEGER_CST && !is_pos_inf) { bool strict_overflow_p=false; tree pos_const; unsigned prec_index = 0; while(prec_index < var_prec && !strict_overflow_p) { pos_const = build_int_cst (type, (((unsigned HOST_WIDE_INT)1) << prec_index)); tree val = compare_name_with_value(LT_EXPR, ssa_name(i), pos_const, &strict_overflow_p); if(val && integer_onep (val)) { maximum = build_int_cst (type, (((unsigned HOST_WIDE_INT)1) << prec_index)-1); break; } ++prec_index; } } else if(is_pos_inf) maximum = vr->max; if(minimum) { vr->min = minimum; vr->type = VR_RANGE; } if(maximum) { vr->max = maximum; vr->type = VR_RANGE; } } } // do further restrictions by exploiting assert_expr for (i = 0; i < num_vr_values; i++) if (vr_value[i]) { tree type = TREE_TYPE (ssa_name(i)); value_range_t *vr = vr_value[i]; if(INTEGRAL_TYPE_P(type) && vr->type == VR_RANGE && vr->min && vr->max) { tree sa_var = ssa_name(i); GIMPLE_type def_stmt = SSA_NAME_DEF_STMT (sa_var ); if(is_gimple_assign (def_stmt) && gimple_assign_rhs_code (def_stmt) == ASSERT_EXPR) { tree src_var = ASSERT_EXPR_VAR (gimple_assign_rhs1 (def_stmt)); value_range_t *src_vr = vr_value[SSA_NAME_VERSION(src_var)]; if(src_vr && src_vr->type == VR_RANGE && src_vr->min && src_vr->max) { bool strict_overflow_p=false; tree val = compare_name_with_value(LT_EXPR, src_var, vr->max, &strict_overflow_p); if(val && integer_onep (val)) vr->max = src_vr->max; strict_overflow_p=false; val = compare_name_with_value(GT_EXPR, src_var, vr->min, &strict_overflow_p); if(val && integer_onep (val)) vr->min = src_vr->min; } } } } }
static bool recognize_single_bit_test (gimple cond, tree *name, tree *bit) { gimple stmt; /* Get at the definition of the result of the bit test. */ if (gimple_cond_code (cond) != NE_EXPR || TREE_CODE (gimple_cond_lhs (cond)) != SSA_NAME || !integer_zerop (gimple_cond_rhs (cond))) return false; stmt = SSA_NAME_DEF_STMT (gimple_cond_lhs (cond)); if (!is_gimple_assign (stmt)) return false; /* Look at which bit is tested. One form to recognize is D.1985_5 = state_3(D) >> control1_4(D); D.1986_6 = (int) D.1985_5; D.1987_7 = op0 & 1; if (D.1987_7 != 0) */ if (gimple_assign_rhs_code (stmt) == BIT_AND_EXPR && integer_onep (gimple_assign_rhs2 (stmt)) && TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME) { tree orig_name = gimple_assign_rhs1 (stmt); /* Look through copies and conversions to eventually find the stmt that computes the shift. */ stmt = SSA_NAME_DEF_STMT (orig_name); while (is_gimple_assign (stmt) && ((CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (stmt)) && (TYPE_PRECISION (TREE_TYPE (gimple_assign_lhs (stmt))) <= TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (stmt))))) || gimple_assign_ssa_name_copy_p (stmt))) stmt = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt)); /* If we found such, decompose it. */ if (is_gimple_assign (stmt) && gimple_assign_rhs_code (stmt) == RSHIFT_EXPR) { /* op0 & (1 << op1) */ *bit = gimple_assign_rhs2 (stmt); *name = gimple_assign_rhs1 (stmt); } else { /* t & 1 */ *bit = integer_zero_node; *name = get_name_for_bit_test (orig_name); } return true; } /* Another form is D.1987_7 = op0 & (1 << CST) if (D.1987_7 != 0) */ if (gimple_assign_rhs_code (stmt) == BIT_AND_EXPR && TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME && integer_pow2p (gimple_assign_rhs2 (stmt))) { *name = gimple_assign_rhs1 (stmt); *bit = build_int_cst (integer_type_node, tree_log2 (gimple_assign_rhs2 (stmt))); return true; } /* Another form is D.1986_6 = 1 << control1_4(D) D.1987_7 = op0 & D.1986_6 if (D.1987_7 != 0) */ if (gimple_assign_rhs_code (stmt) == BIT_AND_EXPR && TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME && TREE_CODE (gimple_assign_rhs2 (stmt)) == SSA_NAME) { gimple tmp; /* Both arguments of the BIT_AND_EXPR can be the single-bit specifying expression. */ tmp = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt)); if (is_gimple_assign (tmp) && gimple_assign_rhs_code (tmp) == LSHIFT_EXPR && integer_onep (gimple_assign_rhs1 (tmp))) { *name = gimple_assign_rhs2 (stmt); *bit = gimple_assign_rhs2 (tmp); return true; } tmp = SSA_NAME_DEF_STMT (gimple_assign_rhs2 (stmt)); if (is_gimple_assign (tmp) && gimple_assign_rhs_code (tmp) == LSHIFT_EXPR && integer_onep (gimple_assign_rhs1 (tmp))) { *name = gimple_assign_rhs1 (stmt); *bit = gimple_assign_rhs2 (tmp); return true; } } return false; }
static unsigned int tree_nrv (void) { tree result = DECL_RESULT (current_function_decl); tree result_type = TREE_TYPE (result); tree found = NULL; basic_block bb; gimple_stmt_iterator gsi; struct nrv_data data; /* If this function does not return an aggregate type in memory, then there is nothing to do. */ if (!aggregate_value_p (result, current_function_decl)) return 0; /* If a GIMPLE type is returned in memory, finalize_nrv_r might create non-GIMPLE. */ if (is_gimple_reg_type (result_type)) return 0; /* Look through each block for assignments to the RESULT_DECL. */ FOR_EACH_BB (bb) { for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) { gimple stmt = gsi_stmt (gsi); tree ret_val; if (gimple_code (stmt) == GIMPLE_RETURN) { /* In a function with an aggregate return value, the gimplifier has changed all non-empty RETURN_EXPRs to return the RESULT_DECL. */ ret_val = gimple_return_retval (stmt); if (ret_val) gcc_assert (ret_val == result); } else if (is_gimple_assign (stmt) && gimple_assign_lhs (stmt) == result) { tree rhs; if (!gimple_assign_copy_p (stmt)) return 0; rhs = gimple_assign_rhs1 (stmt); /* Now verify that this return statement uses the same value as any previously encountered return statement. */ if (found != NULL) { /* If we found a return statement using a different variable than previous return statements, then we can not perform NRV optimizations. */ if (found != rhs) return 0; } else found = rhs; /* The returned value must be a local automatic variable of the same type and alignment as the function's result. */ if (TREE_CODE (found) != VAR_DECL || TREE_THIS_VOLATILE (found) || DECL_CONTEXT (found) != current_function_decl || TREE_STATIC (found) || TREE_ADDRESSABLE (found) || DECL_ALIGN (found) > DECL_ALIGN (result) || !useless_type_conversion_p (result_type, TREE_TYPE (found))) return 0; } else if (is_gimple_assign (stmt)) { tree addr = get_base_address (gimple_assign_lhs (stmt)); /* If there's any MODIFY of component of RESULT, then bail out. */ if (addr && addr == result) return 0; } } } if (!found) return 0; /* If dumping details, then note once and only the NRV replacement. */ if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, "NRV Replaced: "); print_generic_expr (dump_file, found, dump_flags); fprintf (dump_file, " with: "); print_generic_expr (dump_file, result, dump_flags); fprintf (dump_file, "\n"); } /* At this point we know that all the return statements return the same local which has suitable attributes for NRV. Copy debugging information from FOUND to RESULT. */ DECL_NAME (result) = DECL_NAME (found); DECL_SOURCE_LOCATION (result) = DECL_SOURCE_LOCATION (found); DECL_ABSTRACT_ORIGIN (result) = DECL_ABSTRACT_ORIGIN (found); TREE_ADDRESSABLE (result) = TREE_ADDRESSABLE (found); /* Now walk through the function changing all references to VAR to be RESULT. */ data.var = found; data.result = result; FOR_EACH_BB (bb) { for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); ) { gimple stmt = gsi_stmt (gsi); /* If this is a copy from VAR to RESULT, remove it. */ if (gimple_assign_copy_p (stmt) && gimple_assign_lhs (stmt) == result && gimple_assign_rhs1 (stmt) == found) gsi_remove (&gsi, true); else { struct walk_stmt_info wi; memset (&wi, 0, sizeof (wi)); wi.info = &data; walk_gimple_op (stmt, finalize_nrv_r, &wi); gsi_next (&gsi); } } } /* FOUND is no longer used. Ensure it gets removed. */ var_ann (found)->used = 0; return 0; }
/* Expand conditional compare gimple G. A typical CCMP sequence is like: CC0 = CMP (a, b); CC1 = CCMP (NE (CC0, 0), CMP (e, f)); ... CCn = CCMP (NE (CCn-1, 0), CMP (...)); hook gen_ccmp_first is used to expand the first compare. hook gen_ccmp_next is used to expand the following CCMP. PREP_SEQ returns all insns to prepare opearand. GEN_SEQ returns all compare insns. */ static rtx expand_ccmp_expr_1 (gimple *g, rtx_insn **prep_seq, rtx_insn **gen_seq) { tree exp = gimple_assign_rhs_to_tree (g); tree_code code = TREE_CODE (exp); gimple *gs0 = get_gimple_for_ssa_name (TREE_OPERAND (exp, 0)); gimple *gs1 = get_gimple_for_ssa_name (TREE_OPERAND (exp, 1)); rtx tmp; tree_code code0 = gimple_assign_rhs_code (gs0); tree_code code1 = gimple_assign_rhs_code (gs1); gcc_assert (code == BIT_AND_EXPR || code == BIT_IOR_EXPR); gcc_assert (gs0 && gs1 && is_gimple_assign (gs0) && is_gimple_assign (gs1)); if (TREE_CODE_CLASS (code0) == tcc_comparison) { if (TREE_CODE_CLASS (code1) == tcc_comparison) { int unsignedp0, unsignedp1; rtx_code rcode0, rcode1; int speed_p = optimize_insn_for_speed_p (); rtx tmp2 = NULL_RTX, ret = NULL_RTX, ret2 = NULL_RTX; unsigned cost1 = MAX_COST; unsigned cost2 = MAX_COST; unsignedp0 = TYPE_UNSIGNED (TREE_TYPE (gimple_assign_rhs1 (gs0))); unsignedp1 = TYPE_UNSIGNED (TREE_TYPE (gimple_assign_rhs1 (gs1))); rcode0 = get_rtx_code (code0, unsignedp0); rcode1 = get_rtx_code (code1, unsignedp1); rtx_insn *prep_seq_1, *gen_seq_1; tmp = targetm.gen_ccmp_first (&prep_seq_1, &gen_seq_1, rcode0, gimple_assign_rhs1 (gs0), gimple_assign_rhs2 (gs0)); if (tmp != NULL) { ret = expand_ccmp_next (gs1, code, tmp, &prep_seq_1, &gen_seq_1); cost1 = seq_cost (prep_seq_1, speed_p); cost1 += seq_cost (gen_seq_1, speed_p); } /* FIXME: Temporary workaround for PR69619. Avoid exponential compile time due to expanding gs0 and gs1 twice. If gs0 and gs1 are complex, the cost will be high, so avoid reevaluation if above an arbitrary threshold. */ rtx_insn *prep_seq_2, *gen_seq_2; if (tmp == NULL || cost1 < COSTS_N_INSNS (25)) tmp2 = targetm.gen_ccmp_first (&prep_seq_2, &gen_seq_2, rcode1, gimple_assign_rhs1 (gs1), gimple_assign_rhs2 (gs1)); if (!tmp && !tmp2) return NULL_RTX; if (tmp2 != NULL) { ret2 = expand_ccmp_next (gs0, code, tmp2, &prep_seq_2, &gen_seq_2); cost2 = seq_cost (prep_seq_2, speed_p); cost2 += seq_cost (gen_seq_2, speed_p); } if (cost2 < cost1) { *prep_seq = prep_seq_2; *gen_seq = gen_seq_2; return ret2; } *prep_seq = prep_seq_1; *gen_seq = gen_seq_1; return ret; } else { tmp = expand_ccmp_expr_1 (gs1, prep_seq, gen_seq); if (!tmp) return NULL_RTX; return expand_ccmp_next (gs0, code, tmp, prep_seq, gen_seq); } } else { gcc_assert (gimple_assign_rhs_code (gs0) == BIT_AND_EXPR || gimple_assign_rhs_code (gs0) == BIT_IOR_EXPR); if (TREE_CODE_CLASS (gimple_assign_rhs_code (gs1)) == tcc_comparison) { tmp = expand_ccmp_expr_1 (gs0, prep_seq, gen_seq); if (!tmp) return NULL_RTX; return expand_ccmp_next (gs1, code, tmp, prep_seq, gen_seq); } else { gcc_assert (gimple_assign_rhs_code (gs1) == BIT_AND_EXPR || gimple_assign_rhs_code (gs1) == BIT_IOR_EXPR); } } return NULL_RTX; }
static gimple vect_recog_widen_sum_pattern (gimple last_stmt, tree *type_in, tree *type_out) { gimple stmt; tree oprnd0, oprnd1; stmt_vec_info stmt_vinfo = vinfo_for_stmt (last_stmt); tree type, half_type; gimple pattern_stmt; loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (stmt_vinfo); struct loop *loop = LOOP_VINFO_LOOP (loop_info); tree var; if (!is_gimple_assign (last_stmt)) return NULL; type = gimple_expr_type (last_stmt); /* Look for the following pattern DX = (TYPE) X; sum_1 = DX + sum_0; In which DX is at least double the size of X, and sum_1 has been recognized as a reduction variable. */ /* Starting from LAST_STMT, follow the defs of its uses in search of the above pattern. */ if (gimple_assign_rhs_code (last_stmt) != PLUS_EXPR) return NULL; if (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def) return NULL; oprnd0 = gimple_assign_rhs1 (last_stmt); oprnd1 = gimple_assign_rhs2 (last_stmt); if (!types_compatible_p (TREE_TYPE (oprnd0), type) || !types_compatible_p (TREE_TYPE (oprnd1), type)) return NULL; /* So far so good. Since last_stmt was detected as a (summation) reduction, we know that oprnd1 is the reduction variable (defined by a loop-header phi), and oprnd0 is an ssa-name defined by a stmt in the loop body. Left to check that oprnd0 is defined by a cast from type 'type' to type 'TYPE'. */ if (!widened_name_p (oprnd0, last_stmt, &half_type, &stmt)) return NULL; oprnd0 = gimple_assign_rhs1 (stmt); *type_in = half_type; *type_out = type; /* Pattern detected. Create a stmt to be used to replace the pattern: */ var = vect_recog_temp_ssa_var (type, NULL); pattern_stmt = gimple_build_assign_with_ops (WIDEN_SUM_EXPR, var, oprnd0, oprnd1); SSA_NAME_DEF_STMT (var) = pattern_stmt; if (vect_print_dump_info (REPORT_DETAILS)) { fprintf (vect_dump, "vect_recog_widen_sum_pattern: detected: "); print_gimple_stmt (vect_dump, pattern_stmt, 0, TDF_SLIM); } /* We don't allow changing the order of the computation in the inner-loop when doing outer-loop vectorization. */ gcc_assert (!nested_in_vect_loop_p (loop, last_stmt)); return pattern_stmt; }
static gimple vect_recog_dot_prod_pattern (gimple last_stmt, tree *type_in, tree *type_out) { gimple stmt; tree oprnd0, oprnd1; tree oprnd00, oprnd01; stmt_vec_info stmt_vinfo = vinfo_for_stmt (last_stmt); tree type, half_type; gimple pattern_stmt; tree prod_type; loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (stmt_vinfo); struct loop *loop = LOOP_VINFO_LOOP (loop_info); tree var; if (!is_gimple_assign (last_stmt)) return NULL; type = gimple_expr_type (last_stmt); /* Look for the following pattern DX = (TYPE1) X; DY = (TYPE1) Y; DPROD = DX * DY; DDPROD = (TYPE2) DPROD; sum_1 = DDPROD + sum_0; In which - DX is double the size of X - DY is double the size of Y - DX, DY, DPROD all have the same type - sum is the same size of DPROD or bigger - sum has been recognized as a reduction variable. This is equivalent to: DPROD = X w* Y; #widen mult sum_1 = DPROD w+ sum_0; #widen summation or DPROD = X w* Y; #widen mult sum_1 = DPROD + sum_0; #summation */ /* Starting from LAST_STMT, follow the defs of its uses in search of the above pattern. */ if (gimple_assign_rhs_code (last_stmt) != PLUS_EXPR) return NULL; if (STMT_VINFO_IN_PATTERN_P (stmt_vinfo)) { /* Has been detected as widening-summation? */ stmt = STMT_VINFO_RELATED_STMT (stmt_vinfo); type = gimple_expr_type (stmt); if (gimple_assign_rhs_code (stmt) != WIDEN_SUM_EXPR) return NULL; oprnd0 = gimple_assign_rhs1 (stmt); oprnd1 = gimple_assign_rhs2 (stmt); half_type = TREE_TYPE (oprnd0); } else { gimple def_stmt; if (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def) return NULL; oprnd0 = gimple_assign_rhs1 (last_stmt); oprnd1 = gimple_assign_rhs2 (last_stmt); if (!types_compatible_p (TREE_TYPE (oprnd0), type) || !types_compatible_p (TREE_TYPE (oprnd1), type)) return NULL; stmt = last_stmt; if (widened_name_p (oprnd0, stmt, &half_type, &def_stmt)) { stmt = def_stmt; oprnd0 = gimple_assign_rhs1 (stmt); } else half_type = type; } /* So far so good. Since last_stmt was detected as a (summation) reduction, we know that oprnd1 is the reduction variable (defined by a loop-header phi), and oprnd0 is an ssa-name defined by a stmt in the loop body. Left to check that oprnd0 is defined by a (widen_)mult_expr */ prod_type = half_type; stmt = SSA_NAME_DEF_STMT (oprnd0); /* It could not be the dot_prod pattern if the stmt is outside the loop. */ if (!gimple_bb (stmt) || !flow_bb_inside_loop_p (loop, gimple_bb (stmt))) return NULL; /* FORNOW. Can continue analyzing the def-use chain when this stmt in a phi inside the loop (in case we are analyzing an outer-loop). */ if (!is_gimple_assign (stmt)) return NULL; stmt_vinfo = vinfo_for_stmt (stmt); gcc_assert (stmt_vinfo); if (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_internal_def) return NULL; if (gimple_assign_rhs_code (stmt) != MULT_EXPR) return NULL; if (STMT_VINFO_IN_PATTERN_P (stmt_vinfo)) { /* Has been detected as a widening multiplication? */ stmt = STMT_VINFO_RELATED_STMT (stmt_vinfo); if (gimple_assign_rhs_code (stmt) != WIDEN_MULT_EXPR) return NULL; stmt_vinfo = vinfo_for_stmt (stmt); gcc_assert (stmt_vinfo); gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_internal_def); oprnd00 = gimple_assign_rhs1 (stmt); oprnd01 = gimple_assign_rhs2 (stmt); } else { tree half_type0, half_type1; gimple def_stmt; tree oprnd0, oprnd1; oprnd0 = gimple_assign_rhs1 (stmt); oprnd1 = gimple_assign_rhs2 (stmt); if (!types_compatible_p (TREE_TYPE (oprnd0), prod_type) || !types_compatible_p (TREE_TYPE (oprnd1), prod_type)) return NULL; if (!widened_name_p (oprnd0, stmt, &half_type0, &def_stmt)) return NULL; oprnd00 = gimple_assign_rhs1 (def_stmt); if (!widened_name_p (oprnd1, stmt, &half_type1, &def_stmt)) return NULL; oprnd01 = gimple_assign_rhs1 (def_stmt); if (!types_compatible_p (half_type0, half_type1)) return NULL; if (TYPE_PRECISION (prod_type) != TYPE_PRECISION (half_type0) * 2) return NULL; } half_type = TREE_TYPE (oprnd00); *type_in = half_type; *type_out = type; /* Pattern detected. Create a stmt to be used to replace the pattern: */ var = vect_recog_temp_ssa_var (type, NULL); pattern_stmt = gimple_build_assign_with_ops3 (DOT_PROD_EXPR, var, oprnd00, oprnd01, oprnd1); if (vect_print_dump_info (REPORT_DETAILS)) { fprintf (vect_dump, "vect_recog_dot_prod_pattern: detected: "); print_gimple_stmt (vect_dump, pattern_stmt, 0, TDF_SLIM); } /* We don't allow changing the order of the computation in the inner-loop when doing outer-loop vectorization. */ gcc_assert (!nested_in_vect_loop_p (loop, last_stmt)); return pattern_stmt; }
static gimple input_gimple_stmt (struct lto_input_block *ib, struct data_in *data_in, struct function *fn, enum LTO_tags tag) { gimple stmt; enum gimple_code code; unsigned HOST_WIDE_INT num_ops; size_t i; struct bitpack_d bp; code = lto_tag_to_gimple_code (tag); /* Read the tuple header. */ bp = streamer_read_bitpack (ib); num_ops = bp_unpack_var_len_unsigned (&bp); stmt = gimple_alloc (code, num_ops); stmt->gsbase.no_warning = bp_unpack_value (&bp, 1); if (is_gimple_assign (stmt)) stmt->gsbase.nontemporal_move = bp_unpack_value (&bp, 1); stmt->gsbase.has_volatile_ops = bp_unpack_value (&bp, 1); stmt->gsbase.subcode = bp_unpack_var_len_unsigned (&bp); /* Read location information. */ gimple_set_location (stmt, lto_input_location (ib, data_in)); /* Read lexical block reference. */ gimple_set_block (stmt, stream_read_tree (ib, data_in)); /* Read in all the operands. */ switch (code) { case GIMPLE_RESX: gimple_resx_set_region (stmt, streamer_read_hwi (ib)); break; case GIMPLE_EH_MUST_NOT_THROW: gimple_eh_must_not_throw_set_fndecl (stmt, stream_read_tree (ib, data_in)); break; case GIMPLE_EH_DISPATCH: gimple_eh_dispatch_set_region (stmt, streamer_read_hwi (ib)); break; case GIMPLE_ASM: { /* FIXME lto. Move most of this into a new gimple_asm_set_string(). */ tree str; stmt->gimple_asm.ni = streamer_read_uhwi (ib); stmt->gimple_asm.no = streamer_read_uhwi (ib); stmt->gimple_asm.nc = streamer_read_uhwi (ib); stmt->gimple_asm.nl = streamer_read_uhwi (ib); str = streamer_read_string_cst (data_in, ib); stmt->gimple_asm.string = TREE_STRING_POINTER (str); } /* Fallthru */ case GIMPLE_ASSIGN: case GIMPLE_CALL: case GIMPLE_RETURN: case GIMPLE_SWITCH: case GIMPLE_LABEL: case GIMPLE_COND: case GIMPLE_GOTO: case GIMPLE_DEBUG: for (i = 0; i < num_ops; i++) { tree op = stream_read_tree (ib, data_in); gimple_set_op (stmt, i, op); if (!op) continue; /* Fixup FIELD_DECLs in COMPONENT_REFs, they are not handled by decl merging. */ if (TREE_CODE (op) == ADDR_EXPR) op = TREE_OPERAND (op, 0); while (handled_component_p (op)) { if (TREE_CODE (op) == COMPONENT_REF) { tree field, type, tem; tree closest_match = NULL_TREE; field = TREE_OPERAND (op, 1); type = DECL_CONTEXT (field); for (tem = TYPE_FIELDS (type); tem; tem = TREE_CHAIN (tem)) { if (TREE_CODE (tem) != FIELD_DECL) continue; if (tem == field) break; if (DECL_NONADDRESSABLE_P (tem) == DECL_NONADDRESSABLE_P (field) && gimple_compare_field_offset (tem, field)) { if (types_compatible_p (TREE_TYPE (tem), TREE_TYPE (field))) break; else closest_match = tem; } } /* In case of type mismatches across units we can fail to unify some types and thus not find a proper field-decl here. */ if (tem == NULL_TREE) { /* Thus, emit a ODR violation warning. */ if (warning_at (gimple_location (stmt), 0, "use of type %<%E%> with two mismatching " "declarations at field %<%E%>", type, TREE_OPERAND (op, 1))) { if (TYPE_FIELDS (type)) inform (DECL_SOURCE_LOCATION (TYPE_FIELDS (type)), "original type declared here"); inform (DECL_SOURCE_LOCATION (TREE_OPERAND (op, 1)), "field in mismatching type declared here"); if (TYPE_NAME (TREE_TYPE (field)) && (TREE_CODE (TYPE_NAME (TREE_TYPE (field))) == TYPE_DECL)) inform (DECL_SOURCE_LOCATION (TYPE_NAME (TREE_TYPE (field))), "type of field declared here"); if (closest_match && TYPE_NAME (TREE_TYPE (closest_match)) && (TREE_CODE (TYPE_NAME (TREE_TYPE (closest_match))) == TYPE_DECL)) inform (DECL_SOURCE_LOCATION (TYPE_NAME (TREE_TYPE (closest_match))), "type of mismatching field declared here"); } /* And finally fixup the types. */ TREE_OPERAND (op, 0) = build1 (VIEW_CONVERT_EXPR, type, TREE_OPERAND (op, 0)); } else TREE_OPERAND (op, 1) = tem; } op = TREE_OPERAND (op, 0); } } if (is_gimple_call (stmt)) { if (gimple_call_internal_p (stmt)) gimple_call_set_internal_fn (stmt, streamer_read_enum (ib, internal_fn, IFN_LAST)); else gimple_call_set_fntype (stmt, stream_read_tree (ib, data_in)); } break; case GIMPLE_NOP: case GIMPLE_PREDICT: break; case GIMPLE_TRANSACTION: gimple_transaction_set_label (stmt, stream_read_tree (ib, data_in)); break; default: internal_error ("bytecode stream: unknown GIMPLE statement tag %s", lto_tag_name (tag)); } /* Update the properties of symbols, SSA names and labels associated with STMT. */ if (code == GIMPLE_ASSIGN || code == GIMPLE_CALL) { tree lhs = gimple_get_lhs (stmt); if (lhs && TREE_CODE (lhs) == SSA_NAME) SSA_NAME_DEF_STMT (lhs) = stmt; } else if (code == GIMPLE_LABEL) gcc_assert (emit_label_in_global_context_p (gimple_label_label (stmt)) || DECL_CONTEXT (gimple_label_label (stmt)) == fn->decl); else if (code == GIMPLE_ASM) { unsigned i; for (i = 0; i < gimple_asm_noutputs (stmt); i++) { tree op = TREE_VALUE (gimple_asm_output_op (stmt, i)); if (TREE_CODE (op) == SSA_NAME) SSA_NAME_DEF_STMT (op) = stmt; } } /* Reset alias information. */ if (code == GIMPLE_CALL) gimple_call_reset_alias_info (stmt); /* Mark the statement modified so its operand vectors can be filled in. */ gimple_set_modified (stmt, true); return stmt; }
static bool generate_builtin (struct loop *loop, bitmap partition, bool copy_p) { bool res = false; unsigned i, x = 0; basic_block *bbs; gimple write = NULL; tree op0, op1; gimple_stmt_iterator bsi; tree nb_iter = number_of_exit_cond_executions (loop); if (!nb_iter || nb_iter == chrec_dont_know) return false; bbs = get_loop_body_in_dom_order (loop); for (i = 0; i < loop->num_nodes; i++) { basic_block bb = bbs[i]; for (bsi = gsi_start_phis (bb); !gsi_end_p (bsi); gsi_next (&bsi)) x++; for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi)) { gimple stmt = gsi_stmt (bsi); if (bitmap_bit_p (partition, x++) && is_gimple_assign (stmt) && !is_gimple_reg (gimple_assign_lhs (stmt))) { /* Don't generate the builtins when there are more than one memory write. */ if (write != NULL) goto end; write = stmt; } } } if (!write) goto end; op0 = gimple_assign_lhs (write); op1 = gimple_assign_rhs1 (write); if (!(TREE_CODE (op0) == ARRAY_REF || TREE_CODE (op0) == INDIRECT_REF)) goto end; /* The new statements will be placed before LOOP. */ bsi = gsi_last_bb (loop_preheader_edge (loop)->src); if (gimple_assign_rhs_code (write) == INTEGER_CST && (integer_zerop (op1) || real_zerop (op1))) res = generate_memset_zero (write, op0, nb_iter, bsi); /* If this is the last partition for which we generate code, we have to destroy the loop. */ if (res && !copy_p) { unsigned nbbs = loop->num_nodes; basic_block src = loop_preheader_edge (loop)->src; basic_block dest = single_exit (loop)->dest; prop_phis (dest); make_edge (src, dest, EDGE_FALLTHRU); cancel_loop_tree (loop); for (i = 0; i < nbbs; i++) delete_basic_block (bbs[i]); set_immediate_dominator (CDI_DOMINATORS, dest, recompute_dominator (CDI_DOMINATORS, dest)); } end: free (bbs); return res; }
/* Expand conditional compare gimple G. A typical CCMP sequence is like: CC0 = CMP (a, b); CC1 = CCMP (NE (CC0, 0), CMP (e, f)); ... CCn = CCMP (NE (CCn-1, 0), CMP (...)); hook gen_ccmp_first is used to expand the first compare. hook gen_ccmp_next is used to expand the following CCMP. PREP_SEQ returns all insns to prepare opearand. GEN_SEQ returns all compare insns. */ static rtx expand_ccmp_expr_1 (gimple *g, rtx *prep_seq, rtx *gen_seq) { tree exp = gimple_assign_rhs_to_tree (g); enum tree_code code = TREE_CODE (exp); gimple *gs0 = get_gimple_for_ssa_name (TREE_OPERAND (exp, 0)); gimple *gs1 = get_gimple_for_ssa_name (TREE_OPERAND (exp, 1)); rtx tmp; enum tree_code code0 = gimple_assign_rhs_code (gs0); enum tree_code code1 = gimple_assign_rhs_code (gs1); gcc_assert (code == BIT_AND_EXPR || code == BIT_IOR_EXPR); gcc_assert (gs0 && gs1 && is_gimple_assign (gs0) && is_gimple_assign (gs1)); if (TREE_CODE_CLASS (code0) == tcc_comparison) { if (TREE_CODE_CLASS (code1) == tcc_comparison) { int unsignedp0; enum rtx_code rcode0; unsignedp0 = TYPE_UNSIGNED (TREE_TYPE (gimple_assign_rhs1 (gs0))); rcode0 = get_rtx_code (code0, unsignedp0); tmp = targetm.gen_ccmp_first (prep_seq, gen_seq, rcode0, gimple_assign_rhs1 (gs0), gimple_assign_rhs2 (gs0)); if (!tmp) return NULL_RTX; return expand_ccmp_next (gs1, code, tmp, prep_seq, gen_seq); } else { tmp = expand_ccmp_expr_1 (gs1, prep_seq, gen_seq); if (!tmp) return NULL_RTX; return expand_ccmp_next (gs0, code, tmp, prep_seq, gen_seq); } } else { gcc_assert (gimple_assign_rhs_code (gs0) == BIT_AND_EXPR || gimple_assign_rhs_code (gs0) == BIT_IOR_EXPR); if (TREE_CODE_CLASS (gimple_assign_rhs_code (gs1)) == tcc_comparison) { tmp = expand_ccmp_expr_1 (gs0, prep_seq, gen_seq); if (!tmp) return NULL_RTX; return expand_ccmp_next (gs1, code, tmp, prep_seq, gen_seq); } else { gcc_assert (gimple_assign_rhs_code (gs1) == BIT_AND_EXPR || gimple_assign_rhs_code (gs1) == BIT_IOR_EXPR); } } return NULL_RTX; }
static void vect_pattern_recog_1 ( gimple (* vect_recog_func) (gimple, tree *, tree *), gimple_stmt_iterator si) { gimple stmt = gsi_stmt (si), pattern_stmt; stmt_vec_info stmt_info = vinfo_for_stmt (stmt); stmt_vec_info pattern_stmt_info; loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); tree pattern_vectype; tree type_in, type_out; enum tree_code code; int i; gimple next; pattern_stmt = (* vect_recog_func) (stmt, &type_in, &type_out); if (!pattern_stmt) return; if (VECTOR_MODE_P (TYPE_MODE (type_in))) { /* No need to check target support (already checked by the pattern recognition function). */ if (type_out) gcc_assert (VECTOR_MODE_P (TYPE_MODE (type_out))); pattern_vectype = type_out ? type_out : type_in; } else { enum machine_mode vec_mode; enum insn_code icode; optab optab; /* Check target support */ type_in = get_vectype_for_scalar_type (type_in); if (!type_in) return; if (type_out) type_out = get_vectype_for_scalar_type (type_out); else type_out = type_in; if (!type_out) return; pattern_vectype = type_out; if (is_gimple_assign (pattern_stmt)) code = gimple_assign_rhs_code (pattern_stmt); else { gcc_assert (is_gimple_call (pattern_stmt)); code = CALL_EXPR; } optab = optab_for_tree_code (code, type_in, optab_default); vec_mode = TYPE_MODE (type_in); if (!optab || (icode = optab_handler (optab, vec_mode)) == CODE_FOR_nothing || (insn_data[icode].operand[0].mode != TYPE_MODE (type_out))) return; } /* Found a vectorizable pattern. */ if (vect_print_dump_info (REPORT_DETAILS)) { fprintf (vect_dump, "pattern recognized: "); print_gimple_stmt (vect_dump, pattern_stmt, 0, TDF_SLIM); } /* Mark the stmts that are involved in the pattern. */ gsi_insert_before (&si, pattern_stmt, GSI_SAME_STMT); set_vinfo_for_stmt (pattern_stmt, new_stmt_vec_info (pattern_stmt, loop_vinfo, NULL)); pattern_stmt_info = vinfo_for_stmt (pattern_stmt); STMT_VINFO_RELATED_STMT (pattern_stmt_info) = stmt; STMT_VINFO_DEF_TYPE (pattern_stmt_info) = STMT_VINFO_DEF_TYPE (stmt_info); STMT_VINFO_VECTYPE (pattern_stmt_info) = pattern_vectype; STMT_VINFO_IN_PATTERN_P (stmt_info) = true; STMT_VINFO_RELATED_STMT (stmt_info) = pattern_stmt; /* Patterns cannot be vectorized using SLP, because they change the order of computation. */ FOR_EACH_VEC_ELT (gimple, LOOP_VINFO_REDUCTIONS (loop_vinfo), i, next) if (next == stmt) VEC_ordered_remove (gimple, LOOP_VINFO_REDUCTIONS (loop_vinfo), i); }
static inline bool is_replaceable_p (gimple stmt) { use_operand_p use_p; tree def; gimple use_stmt; location_t locus1, locus2; tree block1, block2; /* Only consider modify stmts. */ if (!is_gimple_assign (stmt)) return false; /* If the statement may throw an exception, it cannot be replaced. */ if (stmt_could_throw_p (stmt)) return false; /* Punt if there is more than 1 def. */ def = SINGLE_SSA_TREE_OPERAND (stmt, SSA_OP_DEF); if (!def) return false; /* Only consider definitions which have a single use. */ if (!single_imm_use (def, &use_p, &use_stmt)) return false; /* If the use isn't in this block, it wont be replaced either. */ if (gimple_bb (use_stmt) != gimple_bb (stmt)) return false; locus1 = gimple_location (stmt); block1 = gimple_block (stmt); if (gimple_code (use_stmt) == GIMPLE_PHI) { locus2 = 0; block2 = NULL_TREE; } else { locus2 = gimple_location (use_stmt); block2 = gimple_block (use_stmt); } if (!optimize && ((locus1 && locus1 != locus2) || (block1 && block1 != block2))) return false; /* Used in this block, but at the TOP of the block, not the end. */ if (gimple_code (use_stmt) == GIMPLE_PHI) return false; /* There must be no VDEFs. */ if (!(ZERO_SSA_OPERANDS (stmt, SSA_OP_VDEF))) return false; /* Without alias info we can't move around loads. */ if (gimple_references_memory_p (stmt) && !optimize) return false; /* Float expressions must go through memory if float-store is on. */ if (flag_float_store && FLOAT_TYPE_P (gimple_expr_type (stmt))) return false; /* An assignment with a register variable on the RHS is not replaceable. */ if (gimple_assign_rhs_code (stmt) == VAR_DECL && DECL_HARD_REGISTER (gimple_assign_rhs1 (stmt))) return false; /* No function calls can be replaced. */ if (is_gimple_call (stmt)) return false; /* Leave any stmt with volatile operands alone as well. */ if (gimple_has_volatile_ops (stmt)) return false; return true; }
static bool forward_propagate_addr_into_variable_array_index (tree offset, tree def_rhs, gimple_stmt_iterator *use_stmt_gsi) { tree index, tunit; gimple offset_def, use_stmt = gsi_stmt (*use_stmt_gsi); tree tmp; tunit = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (def_rhs))); if (!host_integerp (tunit, 1)) return false; /* Get the offset's defining statement. */ offset_def = SSA_NAME_DEF_STMT (offset); /* Try to find an expression for a proper index. This is either a multiplication expression by the element size or just the ssa name we came along in case the element size is one. In that case, however, we do not allow multiplications because they can be computing index to a higher level dimension (PR 37861). */ if (integer_onep (tunit)) { if (is_gimple_assign (offset_def) && gimple_assign_rhs_code (offset_def) == MULT_EXPR) return false; index = offset; } else { /* The statement which defines OFFSET before type conversion must be a simple GIMPLE_ASSIGN. */ if (!is_gimple_assign (offset_def)) return false; /* The RHS of the statement which defines OFFSET must be a multiplication of an object by the size of the array elements. This implicitly verifies that the size of the array elements is constant. */ if (gimple_assign_rhs_code (offset_def) == MULT_EXPR && TREE_CODE (gimple_assign_rhs2 (offset_def)) == INTEGER_CST && tree_int_cst_equal (gimple_assign_rhs2 (offset_def), tunit)) { /* The first operand to the MULT_EXPR is the desired index. */ index = gimple_assign_rhs1 (offset_def); } /* If we have idx * tunit + CST * tunit re-associate that. */ else if ((gimple_assign_rhs_code (offset_def) == PLUS_EXPR || gimple_assign_rhs_code (offset_def) == MINUS_EXPR) && TREE_CODE (gimple_assign_rhs1 (offset_def)) == SSA_NAME && TREE_CODE (gimple_assign_rhs2 (offset_def)) == INTEGER_CST && (tmp = div_if_zero_remainder (EXACT_DIV_EXPR, gimple_assign_rhs2 (offset_def), tunit)) != NULL_TREE) { gimple offset_def2 = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (offset_def)); if (is_gimple_assign (offset_def2) && gimple_assign_rhs_code (offset_def2) == MULT_EXPR && TREE_CODE (gimple_assign_rhs2 (offset_def2)) == INTEGER_CST && tree_int_cst_equal (gimple_assign_rhs2 (offset_def2), tunit)) { index = fold_build2 (gimple_assign_rhs_code (offset_def), TREE_TYPE (offset), gimple_assign_rhs1 (offset_def2), tmp); } else return false; } else return false; } /* Replace the pointer addition with array indexing. */ index = force_gimple_operand_gsi (use_stmt_gsi, index, true, NULL_TREE, true, GSI_SAME_STMT); gimple_assign_set_rhs_from_tree (use_stmt_gsi, unshare_expr (def_rhs)); use_stmt = gsi_stmt (*use_stmt_gsi); TREE_OPERAND (TREE_OPERAND (gimple_assign_rhs1 (use_stmt), 0), 1) = index; /* That should have created gimple, so there is no need to record information to undo the propagation. */ fold_stmt_inplace (use_stmt); tidy_after_forward_propagate_addr (use_stmt); return true; }
static void output_gimple_stmt (struct output_block *ob, gimple stmt) { unsigned i; enum gimple_code code; enum LTO_tags tag; struct bitpack_d bp; histogram_value hist; /* Emit identifying tag. */ code = gimple_code (stmt); tag = lto_gimple_code_to_tag (code); streamer_write_record_start (ob, tag); /* Emit the tuple header. */ bp = bitpack_create (ob->main_stream); bp_pack_var_len_unsigned (&bp, gimple_num_ops (stmt)); bp_pack_value (&bp, gimple_no_warning_p (stmt), 1); if (is_gimple_assign (stmt)) bp_pack_value (&bp, gimple_assign_nontemporal_move_p (stmt), 1); bp_pack_value (&bp, gimple_has_volatile_ops (stmt), 1); hist = gimple_histogram_value (cfun, stmt); bp_pack_value (&bp, hist != NULL, 1); bp_pack_var_len_unsigned (&bp, stmt->gsbase.subcode); /* Emit location information for the statement. */ stream_output_location (ob, &bp, LOCATION_LOCUS (gimple_location (stmt))); streamer_write_bitpack (&bp); /* Emit the lexical block holding STMT. */ stream_write_tree (ob, gimple_block (stmt), true); /* Emit the operands. */ switch (gimple_code (stmt)) { case GIMPLE_RESX: streamer_write_hwi (ob, gimple_resx_region (stmt)); break; case GIMPLE_EH_MUST_NOT_THROW: stream_write_tree (ob, gimple_eh_must_not_throw_fndecl (stmt), true); break; case GIMPLE_EH_DISPATCH: streamer_write_hwi (ob, gimple_eh_dispatch_region (stmt)); break; case GIMPLE_ASM: streamer_write_uhwi (ob, gimple_asm_ninputs (stmt)); streamer_write_uhwi (ob, gimple_asm_noutputs (stmt)); streamer_write_uhwi (ob, gimple_asm_nclobbers (stmt)); streamer_write_uhwi (ob, gimple_asm_nlabels (stmt)); streamer_write_string (ob, ob->main_stream, gimple_asm_string (stmt), true); /* Fallthru */ case GIMPLE_ASSIGN: case GIMPLE_CALL: case GIMPLE_RETURN: case GIMPLE_SWITCH: case GIMPLE_LABEL: case GIMPLE_COND: case GIMPLE_GOTO: case GIMPLE_DEBUG: for (i = 0; i < gimple_num_ops (stmt); i++) { tree op = gimple_op (stmt, i); tree *basep = NULL; /* Wrap all uses of non-automatic variables inside MEM_REFs so that we do not have to deal with type mismatches on merged symbols during IL read in. The first operand of GIMPLE_DEBUG must be a decl, not MEM_REF, though. */ if (op && (i || !is_gimple_debug (stmt))) { basep = &op; while (handled_component_p (*basep)) basep = &TREE_OPERAND (*basep, 0); if (TREE_CODE (*basep) == VAR_DECL && !auto_var_in_fn_p (*basep, current_function_decl) && !DECL_REGISTER (*basep)) { bool volatilep = TREE_THIS_VOLATILE (*basep); *basep = build2 (MEM_REF, TREE_TYPE (*basep), build_fold_addr_expr (*basep), build_int_cst (build_pointer_type (TREE_TYPE (*basep)), 0)); TREE_THIS_VOLATILE (*basep) = volatilep; } else basep = NULL; } stream_write_tree (ob, op, true); /* Restore the original base if we wrapped it inside a MEM_REF. */ if (basep) *basep = TREE_OPERAND (TREE_OPERAND (*basep, 0), 0); } if (is_gimple_call (stmt)) { if (gimple_call_internal_p (stmt)) streamer_write_enum (ob->main_stream, internal_fn, IFN_LAST, gimple_call_internal_fn (stmt)); else stream_write_tree (ob, gimple_call_fntype (stmt), true); } break; case GIMPLE_NOP: case GIMPLE_PREDICT: break; case GIMPLE_TRANSACTION: gcc_assert (gimple_transaction_body (stmt) == NULL); stream_write_tree (ob, gimple_transaction_label (stmt), true); break; default: gcc_unreachable (); } if (hist) stream_out_histogram_value (ob, hist); }
void gimple_regimplify_operands (gimple stmt, gimple_stmt_iterator *gsi_p) { size_t i, num_ops; tree lhs; gimple_seq pre = NULL; gimple post_stmt = NULL; push_gimplify_context (gimple_in_ssa_p (cfun)); switch (gimple_code (stmt)) { case GIMPLE_COND: gimplify_expr (gimple_cond_lhs_ptr (stmt), &pre, NULL, is_gimple_val, fb_rvalue); gimplify_expr (gimple_cond_rhs_ptr (stmt), &pre, NULL, is_gimple_val, fb_rvalue); break; case GIMPLE_SWITCH: gimplify_expr (gimple_switch_index_ptr (stmt), &pre, NULL, is_gimple_val, fb_rvalue); break; case GIMPLE_OMP_ATOMIC_LOAD: gimplify_expr (gimple_omp_atomic_load_rhs_ptr (stmt), &pre, NULL, is_gimple_val, fb_rvalue); break; case GIMPLE_ASM: { size_t i, noutputs = gimple_asm_noutputs (stmt); const char *constraint, **oconstraints; bool allows_mem, allows_reg, is_inout; oconstraints = (const char **) alloca ((noutputs) * sizeof (const char *)); for (i = 0; i < noutputs; i++) { tree op = gimple_asm_output_op (stmt, i); constraint = TREE_STRING_POINTER (TREE_VALUE (TREE_PURPOSE (op))); oconstraints[i] = constraint; parse_output_constraint (&constraint, i, 0, 0, &allows_mem, &allows_reg, &is_inout); gimplify_expr (&TREE_VALUE (op), &pre, NULL, is_inout ? is_gimple_min_lval : is_gimple_lvalue, fb_lvalue | fb_mayfail); } for (i = 0; i < gimple_asm_ninputs (stmt); i++) { tree op = gimple_asm_input_op (stmt, i); constraint = TREE_STRING_POINTER (TREE_VALUE (TREE_PURPOSE (op))); parse_input_constraint (&constraint, 0, 0, noutputs, 0, oconstraints, &allows_mem, &allows_reg); if (TREE_ADDRESSABLE (TREE_TYPE (TREE_VALUE (op))) && allows_mem) allows_reg = 0; if (!allows_reg && allows_mem) gimplify_expr (&TREE_VALUE (op), &pre, NULL, is_gimple_lvalue, fb_lvalue | fb_mayfail); else gimplify_expr (&TREE_VALUE (op), &pre, NULL, is_gimple_asm_val, fb_rvalue); } } break; default: /* NOTE: We start gimplifying operands from last to first to make sure that side-effects on the RHS of calls, assignments and ASMs are executed before the LHS. The ordering is not important for other statements. */ num_ops = gimple_num_ops (stmt); for (i = num_ops; i > 0; i--) { tree op = gimple_op (stmt, i - 1); if (op == NULL_TREE) continue; if (i == 1 && (is_gimple_call (stmt) || is_gimple_assign (stmt))) gimplify_expr (&op, &pre, NULL, is_gimple_lvalue, fb_lvalue); else if (i == 2 && is_gimple_assign (stmt) && num_ops == 2 && get_gimple_rhs_class (gimple_expr_code (stmt)) == GIMPLE_SINGLE_RHS) gimplify_expr (&op, &pre, NULL, rhs_predicate_for (gimple_assign_lhs (stmt)), fb_rvalue); else if (i == 2 && is_gimple_call (stmt)) { if (TREE_CODE (op) == FUNCTION_DECL) continue; gimplify_expr (&op, &pre, NULL, is_gimple_call_addr, fb_rvalue); } else gimplify_expr (&op, &pre, NULL, is_gimple_val, fb_rvalue); gimple_set_op (stmt, i - 1, op); } lhs = gimple_get_lhs (stmt); /* If the LHS changed it in a way that requires a simple RHS, create temporary. */ if (lhs && !is_gimple_reg (lhs)) { bool need_temp = false; if (is_gimple_assign (stmt) && num_ops == 2 && get_gimple_rhs_class (gimple_expr_code (stmt)) == GIMPLE_SINGLE_RHS) gimplify_expr (gimple_assign_rhs1_ptr (stmt), &pre, NULL, rhs_predicate_for (gimple_assign_lhs (stmt)), fb_rvalue); else if (is_gimple_reg (lhs)) { if (is_gimple_reg_type (TREE_TYPE (lhs))) { if (is_gimple_call (stmt)) { i = gimple_call_flags (stmt); if ((i & ECF_LOOPING_CONST_OR_PURE) || !(i & (ECF_CONST | ECF_PURE))) need_temp = true; } if (stmt_can_throw_internal (stmt)) need_temp = true; } } else { if (is_gimple_reg_type (TREE_TYPE (lhs))) need_temp = true; else if (TYPE_MODE (TREE_TYPE (lhs)) != BLKmode) { if (is_gimple_call (stmt)) { tree fndecl = gimple_call_fndecl (stmt); if (!aggregate_value_p (TREE_TYPE (lhs), fndecl) && !(fndecl && DECL_RESULT (fndecl) && DECL_BY_REFERENCE (DECL_RESULT (fndecl)))) need_temp = true; } else need_temp = true; } } if (need_temp) { tree temp = create_tmp_reg (TREE_TYPE (lhs), NULL); if (gimple_in_ssa_p (cfun)) temp = make_ssa_name (temp, NULL); gimple_set_lhs (stmt, temp); post_stmt = gimple_build_assign (lhs, temp); } } break; } if (!gimple_seq_empty_p (pre)) gsi_insert_seq_before (gsi_p, pre, GSI_SAME_STMT); if (post_stmt) gsi_insert_after (gsi_p, post_stmt, GSI_NEW_STMT); pop_gimplify_context (NULL); }
static void my_dump_gimple(gimple gnode, gimple_stmt_iterator *ptrgsi) { int gcode; tree tnode; tree funcdecl; tree desc_node; tree ptr_desc_node; tree t; tree tmp_var; tree const_char_restrict_ptr_type_node; gimple tmp_gstmt; gimple new_gnode; const char *hellocstr = "Hello, GCC!\n"; int i; struct c_binding *b; expanded_location xloc; /* * Extract the Gimple Code from a gimple node */ gcode = gimple_code(gnode); /* * Get the line number of cooresponding * source code from a gimple node */ if(gimple_has_location(gnode)) { xloc = expand_location(gimple_location(gnode)); printf("line %d:", xloc.line); } printf("\t\t\t\t%s\n", gimple_code_name[gcode]); switch(gcode) { case GIMPLE_ASSIGN: /* * Add a printf("Hello, GCC!\n"); statement * after the first appearing assignment * if yes equals to 1, then we have already * added the statement, and no need to add * again */ if(!yes) { /* * Since printf is a builtin function, we need * to get the function declaration using * built_in_decls[]. The index number can be * found in gcc source gcc/builtins.def */ funcdecl = built_in_decls[BUILT_IN_PRINTF]; if(funcdecl == NULL_TREE) { printf("cannot find printf\n"); } else { /* * In gimple, every statement is simplified into * three oprands mode. And our printf() statement * is change into following two gimple statements: * * <D.XXX> = (const char * restrict) &"Hello, GCC!\n"[0] * printf(<D.XXX>); * * Note that <D.XXX> is a temporary variable, we can * actually use any name we like as long as no * confliction. */ /* * Generate a STRING_CST, the value is "Hello, GCC!\n" */ desc_node = build_string(strlen(hellocstr), hellocstr); /* * Two points need to notice here: * 1. STRING_CST build by build_string() do * not have TREE_TYPE set, so we need to * set it manually. * 2. build_string() will add a trailing '\0' * when building the STRING_CST, so we do * not need to care with it. */ TREE_TYPE(desc_node) = build_array_type( char_type_node, build_index_type( build_int_cst(NULL_TREE, strlen(hellocstr)))); /* * Define a const char * restrict type node * here for convertion. * I'm not sure why we need to add a restrict * attribute, but GCC really does it when it * converting a STRING_CST from AST to Gimple. */ const_char_restrict_ptr_type_node = build_qualified_type( build_pointer_type( build_qualified_type( char_type_node, TYPE_QUAL_CONST)), TYPE_QUAL_RESTRICT); /* * When we in AST, if we want to use STRING_CST * the form is like this <ADDR_EXPR<STRING_CST>>, * but when we turn to gimple, it is like this * <ADDR_EXPR<ADDAR_REF<STRING_CST>>>. * So we need to do a convertion there. */ /* * First wrap STRING_CST with ARRAY_REF */ t = build4(ARRAY_REF, char_type_node, desc_node, build_int_cst(NULL_TREE, 0), NULL, NULL); /* * Second wrap ARRAY_REF with ADDR_EXPR */ ptr_desc_node = build1(ADDR_EXPR, const_char_restrict_ptr_type_node, t); /* * I'm not sure why we need to use fold_convert() * here, but if we do not, we cannot make the * compiling successful. */ ptr_desc_node = fold_convert( const_char_restrict_ptr_type_node, ptr_desc_node); /* * If is_gimple_min_invariant(ptr_desc_node) * is true, we build a corrent argument, otherwise * the argument is not suitable for gimple call */ if(!is_gimple_min_invariant(ptr_desc_node)) { printf("Something wrong with is_gimple_min_invariant\n"); return ; } /* * This applies for a temporary variable */ tmp_var = make_rename_temp( const_char_restrict_ptr_type_node, "plugin_var"); /* * Build a gimple statement. Still remember that? * <D.XXX> = (const char * restrict) "Hello, GCC!\n" */ tmp_gstmt = gimple_build_assign(tmp_var, ptr_desc_node); /* * Check if the gimple statment is corrent */ if(!is_gimple_assign(tmp_gstmt)) { printf("tmp_gstmt is invalid\n"); } printf("Insert gimple statment:"); print_gimple_stmt(stdout, tmp_gstmt, 0, TDF_DETAILS | TDF_VERBOSE | TDF_TREE); /* * Insert the gimple statment into the basic block */ gsi_insert_after(ptrgsi, tmp_gstmt, GSI_NEW_STMT); if(is_gimple_operand(tmp_var)) { printf("begin to insert printf\n"); yes = 1; printf("Insert gimple statment:"); /* * Insert the gimple statment printf * into the basic block */ new_gnode = gimple_build_call( funcdecl, 1, tmp_var); print_gimple_stmt(stdout, new_gnode, 0, 0); gsi_insert_after(ptrgsi, new_gnode, GSI_NEW_STMT); } else { print_generic_stmt(stdout, ptr_desc_node, TDF_DETAILS | TDF_VERBOSE | TDF_TREE); printf("Not Gimple Operands\n"); } /* * Since we have more than one consecutive statements * to insert, we can actually use build a gimple * sequence, insert all statement into the sequence, * and then insert the sequence into the basic block. * This seems to be a better method. */ } } else { } break; default: break; } }
static gimple vect_recog_widen_mult_pattern (gimple last_stmt, tree *type_in, tree *type_out) { gimple def_stmt0, def_stmt1; tree oprnd0, oprnd1; tree type, half_type0, half_type1; gimple pattern_stmt; tree vectype, vectype_out; tree dummy; tree var; enum tree_code dummy_code; int dummy_int; VEC (tree, heap) *dummy_vec; if (!is_gimple_assign (last_stmt)) return NULL; type = gimple_expr_type (last_stmt); /* Starting from LAST_STMT, follow the defs of its uses in search of the above pattern. */ if (gimple_assign_rhs_code (last_stmt) != MULT_EXPR) return NULL; oprnd0 = gimple_assign_rhs1 (last_stmt); oprnd1 = gimple_assign_rhs2 (last_stmt); if (!types_compatible_p (TREE_TYPE (oprnd0), type) || !types_compatible_p (TREE_TYPE (oprnd1), type)) return NULL; /* Check argument 0 */ if (!widened_name_p (oprnd0, last_stmt, &half_type0, &def_stmt0)) return NULL; oprnd0 = gimple_assign_rhs1 (def_stmt0); /* Check argument 1 */ if (!widened_name_p (oprnd1, last_stmt, &half_type1, &def_stmt1)) return NULL; oprnd1 = gimple_assign_rhs1 (def_stmt1); if (!types_compatible_p (half_type0, half_type1)) return NULL; /* Pattern detected. */ if (vect_print_dump_info (REPORT_DETAILS)) fprintf (vect_dump, "vect_recog_widen_mult_pattern: detected: "); /* Check target support */ vectype = get_vectype_for_scalar_type (half_type0); vectype_out = get_vectype_for_scalar_type (type); if (!vectype || !vectype_out || !supportable_widening_operation (WIDEN_MULT_EXPR, last_stmt, vectype_out, vectype, &dummy, &dummy, &dummy_code, &dummy_code, &dummy_int, &dummy_vec)) return NULL; *type_in = vectype; *type_out = vectype_out; /* Pattern supported. Create a stmt to be used to replace the pattern: */ var = vect_recog_temp_ssa_var (type, NULL); pattern_stmt = gimple_build_assign_with_ops (WIDEN_MULT_EXPR, var, oprnd0, oprnd1); SSA_NAME_DEF_STMT (var) = pattern_stmt; if (vect_print_dump_info (REPORT_DETAILS)) print_gimple_stmt (vect_dump, pattern_stmt, 0, TDF_SLIM); return pattern_stmt; }