static bool forward_propagate_addr_into_variable_array_index (tree offset, tree def_rhs, gimple_stmt_iterator *use_stmt_gsi) { tree index; gimple offset_def, use_stmt = gsi_stmt (*use_stmt_gsi); /* Get the offset's defining statement. */ offset_def = SSA_NAME_DEF_STMT (offset); /* Try to find an expression for a proper index. This is either a multiplication expression by the element size or just the ssa name we came along in case the element size is one. In that case, however, we do not allow multiplications because they can be computing index to a higher level dimension (PR 37861). */ if (integer_onep (TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (def_rhs))))) { if (is_gimple_assign (offset_def) && gimple_assign_rhs_code (offset_def) == MULT_EXPR) return false; index = offset; } else { /* The statement which defines OFFSET before type conversion must be a simple GIMPLE_ASSIGN. */ if (!is_gimple_assign (offset_def)) return false; /* The RHS of the statement which defines OFFSET must be a multiplication of an object by the size of the array elements. This implicitly verifies that the size of the array elements is constant. */ offset = gimple_assign_rhs1 (offset_def); if (gimple_assign_rhs_code (offset_def) != MULT_EXPR || TREE_CODE (gimple_assign_rhs2 (offset_def)) != INTEGER_CST || !simple_cst_equal (gimple_assign_rhs2 (offset_def), TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (def_rhs))))) return false; /* The first operand to the MULT_EXPR is the desired index. */ index = offset; } /* Replace the pointer addition with array indexing. */ gimple_assign_set_rhs_from_tree (use_stmt_gsi, unshare_expr (def_rhs)); use_stmt = gsi_stmt (*use_stmt_gsi); TREE_OPERAND (TREE_OPERAND (gimple_assign_rhs1 (use_stmt), 0), 1) = index; /* That should have created gimple, so there is no need to record information to undo the propagation. */ fold_stmt_inplace (use_stmt); tidy_after_forward_propagate_addr (use_stmt); return true; }
/* Check whether G is a potential conditional compare candidate. */ static bool ccmp_candidate_p (gimple *g) { tree rhs = gimple_assign_rhs_to_tree (g); tree lhs, op0, op1; gimple *gs0, *gs1; enum tree_code tcode, tcode0, tcode1; tcode = TREE_CODE (rhs); if (tcode != BIT_AND_EXPR && tcode != BIT_IOR_EXPR) return false; lhs = gimple_assign_lhs (g); op0 = TREE_OPERAND (rhs, 0); op1 = TREE_OPERAND (rhs, 1); if ((TREE_CODE (op0) != SSA_NAME) || (TREE_CODE (op1) != SSA_NAME) || !has_single_use (lhs)) return false; gs0 = get_gimple_for_ssa_name (op0); gs1 = get_gimple_for_ssa_name (op1); if (!gs0 || !gs1 || !is_gimple_assign (gs0) || !is_gimple_assign (gs1) /* g, gs0 and gs1 must be in the same basic block, since current stage is out-of-ssa. We can not guarantee the correctness when forwording the gs0 and gs1 into g whithout DATAFLOW analysis. */ || gimple_bb (gs0) != gimple_bb (gs1) || gimple_bb (gs0) != gimple_bb (g)) return false; if (!(INTEGRAL_TYPE_P (TREE_TYPE (gimple_assign_rhs1 (gs0))) || POINTER_TYPE_P (TREE_TYPE (gimple_assign_rhs1 (gs0)))) || !(INTEGRAL_TYPE_P (TREE_TYPE (gimple_assign_rhs1 (gs1))) || POINTER_TYPE_P (TREE_TYPE (gimple_assign_rhs1 (gs1))))) return false; tcode0 = gimple_assign_rhs_code (gs0); tcode1 = gimple_assign_rhs_code (gs1); if (TREE_CODE_CLASS (tcode0) == tcc_comparison && TREE_CODE_CLASS (tcode1) == tcc_comparison) return true; if (TREE_CODE_CLASS (tcode0) == tcc_comparison && ccmp_candidate_p (gs1)) return true; else if (TREE_CODE_CLASS (tcode1) == tcc_comparison && ccmp_candidate_p (gs0)) return true; /* We skip ccmp_candidate_p (gs1) && ccmp_candidate_p (gs0) since there is no way to set the CC flag. */ return false; }
void backprop::optimize_assign (gassign *assign, tree lhs, const usage_info *info) { switch (gimple_assign_rhs_code (assign)) { case MULT_EXPR: case RDIV_EXPR: /* If the sign of the result doesn't matter, strip sign operations from both inputs. */ if (info->flags.ignore_sign) replace_assign_rhs (assign, lhs, strip_sign_op (gimple_assign_rhs1 (assign)), strip_sign_op (gimple_assign_rhs2 (assign)), NULL_TREE); break; case COND_EXPR: /* If the sign of A ? B : C doesn't matter, strip sign operations from both B and C. */ if (info->flags.ignore_sign) replace_assign_rhs (assign, lhs, NULL_TREE, strip_sign_op (gimple_assign_rhs2 (assign)), strip_sign_op (gimple_assign_rhs3 (assign))); break; default: break; } }
static tree strip_sign_op_1 (tree rhs) { if (TREE_CODE (rhs) != SSA_NAME) return NULL_TREE; gimple *def_stmt = SSA_NAME_DEF_STMT (rhs); if (gassign *assign = dyn_cast <gassign *> (def_stmt)) switch (gimple_assign_rhs_code (assign)) { case ABS_EXPR: case NEGATE_EXPR: return gimple_assign_rhs1 (assign); default: break; } else if (gcall *call = dyn_cast <gcall *> (def_stmt)) switch (gimple_call_combined_fn (call)) { CASE_CFN_COPYSIGN: return gimple_call_arg (call, 0); default: break; } return NULL_TREE; }
static bool stmt_may_generate_copy (gimple stmt) { if (gimple_code (stmt) == GIMPLE_PHI) return !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (gimple_phi_result (stmt)); if (gimple_code (stmt) != GIMPLE_ASSIGN) return false; /* If the statement has volatile operands, it won't generate a useful copy. */ if (gimple_has_volatile_ops (stmt)) return false; /* Statements with loads and/or stores will never generate a useful copy. */ if (gimple_vuse (stmt)) return false; /* Otherwise, the only statements that generate useful copies are assignments whose RHS is just an SSA name that doesn't flow through abnormal edges. */ return ((gimple_assign_rhs_code (stmt) == SSA_NAME && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (gimple_assign_rhs1 (stmt))) || is_gimple_min_invariant (gimple_assign_rhs1 (stmt))); }
/* Return whether USE_STMT is a floating-point division by DEF. */ static inline bool is_division_by (gimple use_stmt, tree def) { return is_gimple_assign (use_stmt) && gimple_assign_rhs_code (use_stmt) == RDIV_EXPR && gimple_assign_rhs2 (use_stmt) == def /* Do not recognize x / x as valid division, as we are getting confused later by replacing all immediate uses x in such a stmt. */ && gimple_assign_rhs1 (use_stmt) != def; }
static bool can_propagate_from (gimple def_stmt) { use_operand_p use_p; ssa_op_iter iter; gcc_assert (is_gimple_assign (def_stmt)); /* If the rhs has side-effects we cannot propagate from it. */ if (gimple_has_volatile_ops (def_stmt)) return false; /* If the rhs is a load we cannot propagate from it. */ if (TREE_CODE_CLASS (gimple_assign_rhs_code (def_stmt)) == tcc_reference || TREE_CODE_CLASS (gimple_assign_rhs_code (def_stmt)) == tcc_declaration) return false; /* Constants can be always propagated. */ if (gimple_assign_single_p (def_stmt) && is_gimple_min_invariant (gimple_assign_rhs1 (def_stmt))) return true; /* We cannot propagate ssa names that occur in abnormal phi nodes. */ FOR_EACH_SSA_USE_OPERAND (use_p, def_stmt, iter, SSA_OP_USE) if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (USE_FROM_PTR (use_p))) return false; /* If the definition is a conversion of a pointer to a function type, then we can not apply optimizations as some targets require function pointers to be canonicalized and in this case this optimization could eliminate a necessary canonicalization. */ if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt))) { tree rhs = gimple_assign_rhs1 (def_stmt); if (POINTER_TYPE_P (TREE_TYPE (rhs)) && TREE_CODE (TREE_TYPE (TREE_TYPE (rhs))) == FUNCTION_TYPE) return false; } return true; }
static tree rhs_to_tree (tree type, gimple stmt) { enum tree_code code = gimple_assign_rhs_code (stmt); if (get_gimple_rhs_class (code) == GIMPLE_BINARY_RHS) return fold_build2 (code, type, gimple_assign_rhs1 (stmt), gimple_assign_rhs2 (stmt)); else if (get_gimple_rhs_class (code) == GIMPLE_UNARY_RHS) return build1 (code, type, gimple_assign_rhs1 (stmt)); else if (get_gimple_rhs_class (code) == GIMPLE_SINGLE_RHS) return gimple_assign_rhs1 (stmt); else gcc_unreachable (); }
static void instrument_si_overflow (gimple_stmt_iterator gsi) { gimple stmt = gsi_stmt (gsi); tree_code code = gimple_assign_rhs_code (stmt); tree lhs = gimple_assign_lhs (stmt); tree lhstype = TREE_TYPE (lhs); tree a, b; gimple g; /* If this is not a signed operation, don't instrument anything here. Also punt on bit-fields. */ if (!INTEGRAL_TYPE_P (lhstype) || TYPE_OVERFLOW_WRAPS (lhstype) || GET_MODE_BITSIZE (TYPE_MODE (lhstype)) != TYPE_PRECISION (lhstype)) return; switch (code) { case MINUS_EXPR: case PLUS_EXPR: case MULT_EXPR: /* Transform i = u {+,-,*} 5; into i = UBSAN_CHECK_{ADD,SUB,MUL} (u, 5); */ a = gimple_assign_rhs1 (stmt); b = gimple_assign_rhs2 (stmt); g = gimple_build_call_internal (code == PLUS_EXPR ? IFN_UBSAN_CHECK_ADD : code == MINUS_EXPR ? IFN_UBSAN_CHECK_SUB : IFN_UBSAN_CHECK_MUL, 2, a, b); gimple_call_set_lhs (g, lhs); gsi_replace (&gsi, g, false); break; case NEGATE_EXPR: /* Represent i = -u; as i = UBSAN_CHECK_SUB (0, u); */ a = build_int_cst (lhstype, 0); b = gimple_assign_rhs1 (stmt); g = gimple_build_call_internal (IFN_UBSAN_CHECK_SUB, 2, a, b); gimple_call_set_lhs (g, lhs); gsi_replace (&gsi, g, false); break; default: break; } }
/* PREV is the CC flag from precvious compares. The function expands the next compare based on G which ops previous compare with CODE. PREP_SEQ returns all insns to prepare opearands for compare. GEN_SEQ returnss all compare insns. */ static rtx expand_ccmp_next (gimple *g, enum tree_code code, rtx prev, rtx *prep_seq, rtx *gen_seq) { enum rtx_code rcode; int unsignedp = TYPE_UNSIGNED (TREE_TYPE (gimple_assign_rhs1 (g))); gcc_assert (code == BIT_AND_EXPR || code == BIT_IOR_EXPR); rcode = get_rtx_code (gimple_assign_rhs_code (g), unsignedp); return targetm.gen_ccmp_next (prep_seq, gen_seq, prev, rcode, gimple_assign_rhs1 (g), gimple_assign_rhs2 (g), get_rtx_code (code, 0)); }
/* Transform 1) Memory references. */ static void mf_xform_statements (void) { basic_block bb, next; gimple_stmt_iterator i; int saved_last_basic_block = last_basic_block; enum gimple_rhs_class grhs_class; bb = ENTRY_BLOCK_PTR ->next_bb; do { next = bb->next_bb; for (i = gsi_start_bb (bb); !gsi_end_p (i); gsi_next (&i)) { gimple s = gsi_stmt (i); /* Only a few GIMPLE statements can reference memory. */ switch (gimple_code (s)) { case GIMPLE_ASSIGN: mf_xform_derefs_1 (&i, gimple_assign_lhs_ptr (s), gimple_location (s), integer_one_node); mf_xform_derefs_1 (&i, gimple_assign_rhs1_ptr (s), gimple_location (s), integer_zero_node); grhs_class = get_gimple_rhs_class (gimple_assign_rhs_code (s)); if (grhs_class == GIMPLE_BINARY_RHS) mf_xform_derefs_1 (&i, gimple_assign_rhs2_ptr (s), gimple_location (s), integer_zero_node); break; case GIMPLE_RETURN: if (gimple_return_retval (s) != NULL_TREE) { mf_xform_derefs_1 (&i, gimple_return_retval_ptr (s), gimple_location (s), integer_zero_node); } break; default: ; } } bb = next; } while (bb && bb->index <= saved_last_basic_block); }
static bool widened_name_p (tree name, gimple use_stmt, tree *half_type, gimple *def_stmt, bool check_sign) { tree dummy; gimple dummy_gimple; loop_vec_info loop_vinfo; stmt_vec_info stmt_vinfo; tree type = TREE_TYPE (name); tree oprnd0; enum vect_def_type dt; tree def; stmt_vinfo = vinfo_for_stmt (use_stmt); loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo); if (!vect_is_simple_use (name, loop_vinfo, NULL, def_stmt, &def, &dt)) return false; if (dt != vect_internal_def && dt != vect_external_def && dt != vect_constant_def) return false; if (! *def_stmt) return false; if (!is_gimple_assign (*def_stmt)) return false; if (gimple_assign_rhs_code (*def_stmt) != NOP_EXPR) return false; oprnd0 = gimple_assign_rhs1 (*def_stmt); *half_type = TREE_TYPE (oprnd0); if (!INTEGRAL_TYPE_P (type) || !INTEGRAL_TYPE_P (*half_type) || ((TYPE_UNSIGNED (type) != TYPE_UNSIGNED (*half_type)) && check_sign) || (TYPE_PRECISION (type) < (TYPE_PRECISION (*half_type) * 2))) return false; if (!vect_is_simple_use (oprnd0, loop_vinfo, NULL, &dummy_gimple, &dummy, &dt)) return false; return true; }
static gimple get_prop_source_stmt (tree name, bool single_use_only, bool *single_use_p) { bool single_use = true; do { gimple def_stmt = SSA_NAME_DEF_STMT (name); if (!has_single_use (name)) { single_use = false; if (single_use_only) return NULL; } /* If name is defined by a PHI node or is the default def, bail out. */ if (gimple_code (def_stmt) != GIMPLE_ASSIGN) return NULL; /* If name is not a simple copy destination, we found it. */ if (!gimple_assign_copy_p (def_stmt) || TREE_CODE (gimple_assign_rhs1 (def_stmt)) != SSA_NAME) { tree rhs; if (!single_use_only && single_use_p) *single_use_p = single_use; /* We can look through pointer conversions in the search for a useful stmt for the comparison folding. */ rhs = gimple_assign_rhs1 (def_stmt); if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt)) && TREE_CODE (rhs) == SSA_NAME && POINTER_TYPE_P (TREE_TYPE (gimple_assign_lhs (def_stmt))) && POINTER_TYPE_P (TREE_TYPE (rhs))) name = rhs; else return def_stmt; } else { /* Continue searching the def of the copy source name. */ name = gimple_assign_rhs1 (def_stmt); } } while (1); }
static bool recognize_bits_test (gimple cond, tree *name, tree *bits) { gimple stmt; /* Get at the definition of the result of the bit test. */ if (gimple_cond_code (cond) != NE_EXPR || TREE_CODE (gimple_cond_lhs (cond)) != SSA_NAME || !integer_zerop (gimple_cond_rhs (cond))) return false; stmt = SSA_NAME_DEF_STMT (gimple_cond_lhs (cond)); if (!is_gimple_assign (stmt) || gimple_assign_rhs_code (stmt) != BIT_AND_EXPR) return false; *name = get_name_for_bit_test (gimple_assign_rhs1 (stmt)); *bits = gimple_assign_rhs2 (stmt); return true; }
static tree get_name_for_bit_test (tree candidate) { /* Skip single-use names in favor of using the name from a non-widening conversion definition. */ if (TREE_CODE (candidate) == SSA_NAME && has_single_use (candidate)) { gimple def_stmt = SSA_NAME_DEF_STMT (candidate); if (is_gimple_assign (def_stmt) && CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt))) { if (TYPE_PRECISION (TREE_TYPE (candidate)) <= TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))) return gimple_assign_rhs1 (def_stmt); } } return candidate; }
static enum ssa_prop_result copy_prop_visit_assignment (gimple stmt, tree *result_p) { tree lhs, rhs; prop_value_t *rhs_val; lhs = gimple_assign_lhs (stmt); rhs = gimple_assign_rhs1 (stmt); gcc_assert (gimple_assign_rhs_code (stmt) == SSA_NAME); rhs_val = get_copy_of_val (rhs); if (TREE_CODE (lhs) == SSA_NAME) { /* Straight copy between two SSA names. First, make sure that we can propagate the RHS into uses of LHS. */ if (!may_propagate_copy (lhs, rhs)) return SSA_PROP_VARYING; /* Notice that in the case of assignments, we make the LHS be a copy of RHS's value, not of RHS itself. This avoids keeping unnecessary copy-of chains (assignments cannot be in a cycle like PHI nodes), speeding up the propagation process. This is different from what we do in copy_prop_visit_phi_node. In those cases, we are interested in the copy-of chains. */ *result_p = lhs; if (set_copy_of_val (*result_p, rhs_val->value)) return SSA_PROP_INTERESTING; else return SSA_PROP_NOT_INTERESTING; } return SSA_PROP_VARYING; }
bool gimple_simplify (gimple stmt, code_helper *rcode, tree *ops, gimple_seq *seq, tree (*valueize)(tree)) { switch (gimple_code (stmt)) { case GIMPLE_ASSIGN: { enum tree_code code = gimple_assign_rhs_code (stmt); tree type = TREE_TYPE (gimple_assign_lhs (stmt)); switch (gimple_assign_rhs_class (stmt)) { case GIMPLE_SINGLE_RHS: if (code == REALPART_EXPR || code == IMAGPART_EXPR || code == VIEW_CONVERT_EXPR) { tree op0 = TREE_OPERAND (gimple_assign_rhs1 (stmt), 0); if (valueize && TREE_CODE (op0) == SSA_NAME) { tree tem = valueize (op0); if (tem) op0 = tem; } *rcode = code; ops[0] = op0; return gimple_resimplify1 (seq, rcode, type, ops, valueize); } else if (code == BIT_FIELD_REF) { tree rhs1 = gimple_assign_rhs1 (stmt); tree op0 = TREE_OPERAND (rhs1, 0); if (valueize && TREE_CODE (op0) == SSA_NAME) { tree tem = valueize (op0); if (tem) op0 = tem; } *rcode = code; ops[0] = op0; ops[1] = TREE_OPERAND (rhs1, 1); ops[2] = TREE_OPERAND (rhs1, 2); return gimple_resimplify3 (seq, rcode, type, ops, valueize); } else if (code == SSA_NAME && valueize) { tree op0 = gimple_assign_rhs1 (stmt); tree valueized = valueize (op0); if (!valueized || op0 == valueized) return false; ops[0] = valueized; *rcode = TREE_CODE (op0); return true; } break; case GIMPLE_UNARY_RHS: { tree rhs1 = gimple_assign_rhs1 (stmt); if (valueize && TREE_CODE (rhs1) == SSA_NAME) { tree tem = valueize (rhs1); if (tem) rhs1 = tem; } *rcode = code; ops[0] = rhs1; return gimple_resimplify1 (seq, rcode, type, ops, valueize); } case GIMPLE_BINARY_RHS: { tree rhs1 = gimple_assign_rhs1 (stmt); if (valueize && TREE_CODE (rhs1) == SSA_NAME) { tree tem = valueize (rhs1); if (tem) rhs1 = tem; } tree rhs2 = gimple_assign_rhs2 (stmt); if (valueize && TREE_CODE (rhs2) == SSA_NAME) { tree tem = valueize (rhs2); if (tem) rhs2 = tem; } *rcode = code; ops[0] = rhs1; ops[1] = rhs2; return gimple_resimplify2 (seq, rcode, type, ops, valueize); } case GIMPLE_TERNARY_RHS: { tree rhs1 = gimple_assign_rhs1 (stmt); if (valueize && TREE_CODE (rhs1) == SSA_NAME) { tree tem = valueize (rhs1); if (tem) rhs1 = tem; } tree rhs2 = gimple_assign_rhs2 (stmt); if (valueize && TREE_CODE (rhs2) == SSA_NAME) { tree tem = valueize (rhs2); if (tem) rhs2 = tem; } tree rhs3 = gimple_assign_rhs3 (stmt); if (valueize && TREE_CODE (rhs3) == SSA_NAME) { tree tem = valueize (rhs3); if (tem) rhs3 = tem; } *rcode = code; ops[0] = rhs1; ops[1] = rhs2; ops[2] = rhs3; return gimple_resimplify3 (seq, rcode, type, ops, valueize); } default: gcc_unreachable (); } break; } case GIMPLE_CALL: /* ??? This way we can't simplify calls with side-effects. */ if (gimple_call_lhs (stmt) != NULL_TREE) { tree fn = gimple_call_fn (stmt); /* ??? Internal function support missing. */ if (!fn) return false; if (valueize && TREE_CODE (fn) == SSA_NAME) { tree tem = valueize (fn); if (tem) fn = tem; } if (!fn || TREE_CODE (fn) != ADDR_EXPR || TREE_CODE (TREE_OPERAND (fn, 0)) != FUNCTION_DECL || DECL_BUILT_IN_CLASS (TREE_OPERAND (fn, 0)) != BUILT_IN_NORMAL || !builtin_decl_implicit (DECL_FUNCTION_CODE (TREE_OPERAND (fn, 0))) || !gimple_builtin_call_types_compatible_p (stmt, TREE_OPERAND (fn, 0))) return false; tree decl = TREE_OPERAND (fn, 0); tree type = TREE_TYPE (gimple_call_lhs (stmt)); switch (gimple_call_num_args (stmt)) { case 1: { tree arg1 = gimple_call_arg (stmt, 0); if (valueize && TREE_CODE (arg1) == SSA_NAME) { tree tem = valueize (arg1); if (tem) arg1 = tem; } *rcode = DECL_FUNCTION_CODE (decl); ops[0] = arg1; return gimple_resimplify1 (seq, rcode, type, ops, valueize); } case 2: { tree arg1 = gimple_call_arg (stmt, 0); if (valueize && TREE_CODE (arg1) == SSA_NAME) { tree tem = valueize (arg1); if (tem) arg1 = tem; } tree arg2 = gimple_call_arg (stmt, 1); if (valueize && TREE_CODE (arg2) == SSA_NAME) { tree tem = valueize (arg2); if (tem) arg2 = tem; } *rcode = DECL_FUNCTION_CODE (decl); ops[0] = arg1; ops[1] = arg2; return gimple_resimplify2 (seq, rcode, type, ops, valueize); } case 3: { tree arg1 = gimple_call_arg (stmt, 0); if (valueize && TREE_CODE (arg1) == SSA_NAME) { tree tem = valueize (arg1); if (tem) arg1 = tem; } tree arg2 = gimple_call_arg (stmt, 1); if (valueize && TREE_CODE (arg2) == SSA_NAME) { tree tem = valueize (arg2); if (tem) arg2 = tem; } tree arg3 = gimple_call_arg (stmt, 2); if (valueize && TREE_CODE (arg3) == SSA_NAME) { tree tem = valueize (arg3); if (tem) arg3 = tem; } *rcode = DECL_FUNCTION_CODE (decl); ops[0] = arg1; ops[1] = arg2; ops[2] = arg3; return gimple_resimplify3 (seq, rcode, type, ops, valueize); } default: return false; } } break; case GIMPLE_COND: { tree lhs = gimple_cond_lhs (stmt); if (valueize && TREE_CODE (lhs) == SSA_NAME) { tree tem = valueize (lhs); if (tem) lhs = tem; } tree rhs = gimple_cond_rhs (stmt); if (valueize && TREE_CODE (rhs) == SSA_NAME) { tree tem = valueize (rhs); if (tem) rhs = tem; } *rcode = gimple_cond_code (stmt); ops[0] = lhs; ops[1] = rhs; return gimple_resimplify2 (seq, rcode, boolean_type_node, ops, valueize); } default: break; } return false; }
static bool recognize_single_bit_test (gimple cond, tree *name, tree *bit) { gimple stmt; /* Get at the definition of the result of the bit test. */ if (gimple_cond_code (cond) != NE_EXPR || TREE_CODE (gimple_cond_lhs (cond)) != SSA_NAME || !integer_zerop (gimple_cond_rhs (cond))) return false; stmt = SSA_NAME_DEF_STMT (gimple_cond_lhs (cond)); if (!is_gimple_assign (stmt)) return false; /* Look at which bit is tested. One form to recognize is D.1985_5 = state_3(D) >> control1_4(D); D.1986_6 = (int) D.1985_5; D.1987_7 = op0 & 1; if (D.1987_7 != 0) */ if (gimple_assign_rhs_code (stmt) == BIT_AND_EXPR && integer_onep (gimple_assign_rhs2 (stmt)) && TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME) { tree orig_name = gimple_assign_rhs1 (stmt); /* Look through copies and conversions to eventually find the stmt that computes the shift. */ stmt = SSA_NAME_DEF_STMT (orig_name); while (is_gimple_assign (stmt) && ((CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (stmt)) && (TYPE_PRECISION (TREE_TYPE (gimple_assign_lhs (stmt))) <= TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (stmt))))) || gimple_assign_ssa_name_copy_p (stmt))) stmt = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt)); /* If we found such, decompose it. */ if (is_gimple_assign (stmt) && gimple_assign_rhs_code (stmt) == RSHIFT_EXPR) { /* op0 & (1 << op1) */ *bit = gimple_assign_rhs2 (stmt); *name = gimple_assign_rhs1 (stmt); } else { /* t & 1 */ *bit = integer_zero_node; *name = get_name_for_bit_test (orig_name); } return true; } /* Another form is D.1987_7 = op0 & (1 << CST) if (D.1987_7 != 0) */ if (gimple_assign_rhs_code (stmt) == BIT_AND_EXPR && TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME && integer_pow2p (gimple_assign_rhs2 (stmt))) { *name = gimple_assign_rhs1 (stmt); *bit = build_int_cst (integer_type_node, tree_log2 (gimple_assign_rhs2 (stmt))); return true; } /* Another form is D.1986_6 = 1 << control1_4(D) D.1987_7 = op0 & D.1986_6 if (D.1987_7 != 0) */ if (gimple_assign_rhs_code (stmt) == BIT_AND_EXPR && TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME && TREE_CODE (gimple_assign_rhs2 (stmt)) == SSA_NAME) { gimple tmp; /* Both arguments of the BIT_AND_EXPR can be the single-bit specifying expression. */ tmp = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt)); if (is_gimple_assign (tmp) && gimple_assign_rhs_code (tmp) == LSHIFT_EXPR && integer_onep (gimple_assign_rhs1 (tmp))) { *name = gimple_assign_rhs2 (stmt); *bit = gimple_assign_rhs2 (tmp); return true; } tmp = SSA_NAME_DEF_STMT (gimple_assign_rhs2 (stmt)); if (is_gimple_assign (tmp) && gimple_assign_rhs_code (tmp) == LSHIFT_EXPR && integer_onep (gimple_assign_rhs1 (tmp))) { *name = gimple_assign_rhs1 (stmt); *bit = gimple_assign_rhs2 (tmp); return true; } } return false; }
static bool init_dont_simulate_again (void) { basic_block bb; gimple_stmt_iterator gsi; gimple phi; bool saw_a_complex_op = false; FOR_EACH_BB (bb) { for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi)) { phi = gsi_stmt (gsi); prop_set_simulate_again (phi, is_complex_reg (gimple_phi_result (phi))); } for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) { gimple stmt; tree op0, op1; bool sim_again_p; stmt = gsi_stmt (gsi); op0 = op1 = NULL_TREE; /* Most control-altering statements must be initially simulated, else we won't cover the entire cfg. */ sim_again_p = stmt_ends_bb_p (stmt); switch (gimple_code (stmt)) { case GIMPLE_CALL: if (gimple_call_lhs (stmt)) sim_again_p = is_complex_reg (gimple_call_lhs (stmt)); break; case GIMPLE_ASSIGN: sim_again_p = is_complex_reg (gimple_assign_lhs (stmt)); if (gimple_assign_rhs_code (stmt) == REALPART_EXPR || gimple_assign_rhs_code (stmt) == IMAGPART_EXPR) op0 = TREE_OPERAND (gimple_assign_rhs1 (stmt), 0); else op0 = gimple_assign_rhs1 (stmt); if (gimple_num_ops (stmt) > 2) op1 = gimple_assign_rhs2 (stmt); break; case GIMPLE_COND: op0 = gimple_cond_lhs (stmt); op1 = gimple_cond_rhs (stmt); break; default: break; } if (op0 || op1) switch (gimple_expr_code (stmt)) { case EQ_EXPR: case NE_EXPR: case PLUS_EXPR: case MINUS_EXPR: case MULT_EXPR: case TRUNC_DIV_EXPR: case CEIL_DIV_EXPR: case FLOOR_DIV_EXPR: case ROUND_DIV_EXPR: case RDIV_EXPR: if (TREE_CODE (TREE_TYPE (op0)) == COMPLEX_TYPE || TREE_CODE (TREE_TYPE (op1)) == COMPLEX_TYPE) saw_a_complex_op = true; break; case NEGATE_EXPR: case CONJ_EXPR: if (TREE_CODE (TREE_TYPE (op0)) == COMPLEX_TYPE) saw_a_complex_op = true; break; case REALPART_EXPR: case IMAGPART_EXPR: /* The total store transformation performed during gimplification creates such uninitialized loads and we need to lower the statement to be able to fix things up. */ if (TREE_CODE (op0) == SSA_NAME && ssa_undefined_value_p (op0)) saw_a_complex_op = true; break; default: break; } prop_set_simulate_again (stmt, sim_again_p); } } return saw_a_complex_op; }
static bool check_pow (gimple pow_call) { tree base, expn; enum tree_code bc, ec; if (gimple_call_num_args (pow_call) != 2) return false; base = gimple_call_arg (pow_call, 0); expn = gimple_call_arg (pow_call, 1); if (!check_target_format (expn)) return false; bc = TREE_CODE (base); ec = TREE_CODE (expn); /* Folding candidates are not interesting. Can actually assert that it is already folded. */ if (ec == REAL_CST && bc == REAL_CST) return false; if (bc == REAL_CST) { /* Only handle a fixed range of constant. */ REAL_VALUE_TYPE mv; REAL_VALUE_TYPE bcv = TREE_REAL_CST (base); if (REAL_VALUES_EQUAL (bcv, dconst1)) return false; if (REAL_VALUES_LESS (bcv, dconst1)) return false; real_from_integer (&mv, TYPE_MODE (TREE_TYPE (base)), 256, 0, 1); if (REAL_VALUES_LESS (mv, bcv)) return false; return true; } else if (bc == SSA_NAME) { tree base_val0, base_var, type; gimple base_def; int bit_sz; /* Only handles cases where base value is converted from integer values. */ base_def = SSA_NAME_DEF_STMT (base); if (gimple_code (base_def) != GIMPLE_ASSIGN) return false; if (gimple_assign_rhs_code (base_def) != FLOAT_EXPR) return false; base_val0 = gimple_assign_rhs1 (base_def); base_var = SSA_NAME_VAR (base_val0); if (!DECL_P (base_var)) return false; type = TREE_TYPE (base_var); if (TREE_CODE (type) != INTEGER_TYPE) return false; bit_sz = TYPE_PRECISION (type); /* If the type of the base is too wide, the resulting shrink wrapping condition will be too conservative. */ if (bit_sz > MAX_BASE_INT_BIT_SIZE) return false; return true; } else return false; }
static void vect_pattern_recog_1 ( gimple (* vect_recog_func) (gimple, tree *, tree *), gimple_stmt_iterator si) { gimple stmt = gsi_stmt (si), pattern_stmt; stmt_vec_info stmt_info = vinfo_for_stmt (stmt); stmt_vec_info pattern_stmt_info; loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); tree pattern_vectype; tree type_in, type_out; enum tree_code code; int i; gimple next; pattern_stmt = (* vect_recog_func) (stmt, &type_in, &type_out); if (!pattern_stmt) return; if (VECTOR_MODE_P (TYPE_MODE (type_in))) { /* No need to check target support (already checked by the pattern recognition function). */ if (type_out) gcc_assert (VECTOR_MODE_P (TYPE_MODE (type_out))); pattern_vectype = type_out ? type_out : type_in; } else { enum machine_mode vec_mode; enum insn_code icode; optab optab; /* Check target support */ type_in = get_vectype_for_scalar_type (type_in); if (!type_in) return; if (type_out) type_out = get_vectype_for_scalar_type (type_out); else type_out = type_in; if (!type_out) return; pattern_vectype = type_out; if (is_gimple_assign (pattern_stmt)) code = gimple_assign_rhs_code (pattern_stmt); else { gcc_assert (is_gimple_call (pattern_stmt)); code = CALL_EXPR; } optab = optab_for_tree_code (code, type_in, optab_default); vec_mode = TYPE_MODE (type_in); if (!optab || (icode = optab_handler (optab, vec_mode)) == CODE_FOR_nothing || (insn_data[icode].operand[0].mode != TYPE_MODE (type_out))) return; } /* Found a vectorizable pattern. */ if (vect_print_dump_info (REPORT_DETAILS)) { fprintf (vect_dump, "pattern recognized: "); print_gimple_stmt (vect_dump, pattern_stmt, 0, TDF_SLIM); } /* Mark the stmts that are involved in the pattern. */ gsi_insert_before (&si, pattern_stmt, GSI_SAME_STMT); set_vinfo_for_stmt (pattern_stmt, new_stmt_vec_info (pattern_stmt, loop_vinfo, NULL)); pattern_stmt_info = vinfo_for_stmt (pattern_stmt); STMT_VINFO_RELATED_STMT (pattern_stmt_info) = stmt; STMT_VINFO_DEF_TYPE (pattern_stmt_info) = STMT_VINFO_DEF_TYPE (stmt_info); STMT_VINFO_VECTYPE (pattern_stmt_info) = pattern_vectype; STMT_VINFO_IN_PATTERN_P (stmt_info) = true; STMT_VINFO_RELATED_STMT (stmt_info) = pattern_stmt; /* Patterns cannot be vectorized using SLP, because they change the order of computation. */ FOR_EACH_VEC_ELT (gimple, LOOP_VINFO_REDUCTIONS (loop_vinfo), i, next) if (next == stmt) VEC_ordered_remove (gimple, LOOP_VINFO_REDUCTIONS (loop_vinfo), i); }
expr_hash_elt::expr_hash_elt (gimple *stmt, tree orig_lhs) { enum gimple_code code = gimple_code (stmt); struct hashable_expr *expr = this->expr (); if (code == GIMPLE_ASSIGN) { enum tree_code subcode = gimple_assign_rhs_code (stmt); switch (get_gimple_rhs_class (subcode)) { case GIMPLE_SINGLE_RHS: expr->kind = EXPR_SINGLE; expr->type = TREE_TYPE (gimple_assign_rhs1 (stmt)); expr->ops.single.rhs = gimple_assign_rhs1 (stmt); break; case GIMPLE_UNARY_RHS: expr->kind = EXPR_UNARY; expr->type = TREE_TYPE (gimple_assign_lhs (stmt)); if (CONVERT_EXPR_CODE_P (subcode)) subcode = NOP_EXPR; expr->ops.unary.op = subcode; expr->ops.unary.opnd = gimple_assign_rhs1 (stmt); break; case GIMPLE_BINARY_RHS: expr->kind = EXPR_BINARY; expr->type = TREE_TYPE (gimple_assign_lhs (stmt)); expr->ops.binary.op = subcode; expr->ops.binary.opnd0 = gimple_assign_rhs1 (stmt); expr->ops.binary.opnd1 = gimple_assign_rhs2 (stmt); break; case GIMPLE_TERNARY_RHS: expr->kind = EXPR_TERNARY; expr->type = TREE_TYPE (gimple_assign_lhs (stmt)); expr->ops.ternary.op = subcode; expr->ops.ternary.opnd0 = gimple_assign_rhs1 (stmt); expr->ops.ternary.opnd1 = gimple_assign_rhs2 (stmt); expr->ops.ternary.opnd2 = gimple_assign_rhs3 (stmt); break; default: gcc_unreachable (); } } else if (code == GIMPLE_COND) { expr->type = boolean_type_node; expr->kind = EXPR_BINARY; expr->ops.binary.op = gimple_cond_code (stmt); expr->ops.binary.opnd0 = gimple_cond_lhs (stmt); expr->ops.binary.opnd1 = gimple_cond_rhs (stmt); } else if (gcall *call_stmt = dyn_cast <gcall *> (stmt)) { size_t nargs = gimple_call_num_args (call_stmt); size_t i; gcc_assert (gimple_call_lhs (call_stmt)); expr->type = TREE_TYPE (gimple_call_lhs (call_stmt)); expr->kind = EXPR_CALL; expr->ops.call.fn_from = call_stmt; if (gimple_call_flags (call_stmt) & (ECF_CONST | ECF_PURE)) expr->ops.call.pure = true; else expr->ops.call.pure = false; expr->ops.call.nargs = nargs; expr->ops.call.args = XCNEWVEC (tree, nargs); for (i = 0; i < nargs; i++) expr->ops.call.args[i] = gimple_call_arg (call_stmt, i); } else if (gswitch *swtch_stmt = dyn_cast <gswitch *> (stmt)) { expr->type = TREE_TYPE (gimple_switch_index (swtch_stmt)); expr->kind = EXPR_SINGLE; expr->ops.single.rhs = gimple_switch_index (swtch_stmt); } else if (code == GIMPLE_GOTO) { expr->type = TREE_TYPE (gimple_goto_dest (stmt)); expr->kind = EXPR_SINGLE; expr->ops.single.rhs = gimple_goto_dest (stmt); } else if (code == GIMPLE_PHI) { size_t nargs = gimple_phi_num_args (stmt); size_t i; expr->type = TREE_TYPE (gimple_phi_result (stmt)); expr->kind = EXPR_PHI; expr->ops.phi.nargs = nargs; expr->ops.phi.args = XCNEWVEC (tree, nargs); for (i = 0; i < nargs; i++) expr->ops.phi.args[i] = gimple_phi_arg_def (stmt, i); } else gcc_unreachable (); m_lhs = orig_lhs; m_vop = gimple_vuse (stmt); m_hash = avail_expr_hash (this); m_stamp = this; }
/* Expand conditional compare gimple G. A typical CCMP sequence is like: CC0 = CMP (a, b); CC1 = CCMP (NE (CC0, 0), CMP (e, f)); ... CCn = CCMP (NE (CCn-1, 0), CMP (...)); hook gen_ccmp_first is used to expand the first compare. hook gen_ccmp_next is used to expand the following CCMP. PREP_SEQ returns all insns to prepare opearand. GEN_SEQ returns all compare insns. */ static rtx expand_ccmp_expr_1 (gimple *g, rtx *prep_seq, rtx *gen_seq) { tree exp = gimple_assign_rhs_to_tree (g); enum tree_code code = TREE_CODE (exp); gimple *gs0 = get_gimple_for_ssa_name (TREE_OPERAND (exp, 0)); gimple *gs1 = get_gimple_for_ssa_name (TREE_OPERAND (exp, 1)); rtx tmp; enum tree_code code0 = gimple_assign_rhs_code (gs0); enum tree_code code1 = gimple_assign_rhs_code (gs1); gcc_assert (code == BIT_AND_EXPR || code == BIT_IOR_EXPR); gcc_assert (gs0 && gs1 && is_gimple_assign (gs0) && is_gimple_assign (gs1)); if (TREE_CODE_CLASS (code0) == tcc_comparison) { if (TREE_CODE_CLASS (code1) == tcc_comparison) { int unsignedp0; enum rtx_code rcode0; unsignedp0 = TYPE_UNSIGNED (TREE_TYPE (gimple_assign_rhs1 (gs0))); rcode0 = get_rtx_code (code0, unsignedp0); tmp = targetm.gen_ccmp_first (prep_seq, gen_seq, rcode0, gimple_assign_rhs1 (gs0), gimple_assign_rhs2 (gs0)); if (!tmp) return NULL_RTX; return expand_ccmp_next (gs1, code, tmp, prep_seq, gen_seq); } else { tmp = expand_ccmp_expr_1 (gs1, prep_seq, gen_seq); if (!tmp) return NULL_RTX; return expand_ccmp_next (gs0, code, tmp, prep_seq, gen_seq); } } else { gcc_assert (gimple_assign_rhs_code (gs0) == BIT_AND_EXPR || gimple_assign_rhs_code (gs0) == BIT_IOR_EXPR); if (TREE_CODE_CLASS (gimple_assign_rhs_code (gs1)) == tcc_comparison) { tmp = expand_ccmp_expr_1 (gs0, prep_seq, gen_seq); if (!tmp) return NULL_RTX; return expand_ccmp_next (gs1, code, tmp, prep_seq, gen_seq); } else { gcc_assert (gimple_assign_rhs_code (gs1) == BIT_AND_EXPR || gimple_assign_rhs_code (gs1) == BIT_IOR_EXPR); } } return NULL_RTX; }
static bool forward_propagate_addr_into_variable_array_index (tree offset, tree def_rhs, gimple_stmt_iterator *use_stmt_gsi) { tree index, tunit; gimple offset_def, use_stmt = gsi_stmt (*use_stmt_gsi); tree tmp; tunit = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (def_rhs))); if (!host_integerp (tunit, 1)) return false; /* Get the offset's defining statement. */ offset_def = SSA_NAME_DEF_STMT (offset); /* Try to find an expression for a proper index. This is either a multiplication expression by the element size or just the ssa name we came along in case the element size is one. In that case, however, we do not allow multiplications because they can be computing index to a higher level dimension (PR 37861). */ if (integer_onep (tunit)) { if (is_gimple_assign (offset_def) && gimple_assign_rhs_code (offset_def) == MULT_EXPR) return false; index = offset; } else { /* The statement which defines OFFSET before type conversion must be a simple GIMPLE_ASSIGN. */ if (!is_gimple_assign (offset_def)) return false; /* The RHS of the statement which defines OFFSET must be a multiplication of an object by the size of the array elements. This implicitly verifies that the size of the array elements is constant. */ if (gimple_assign_rhs_code (offset_def) == MULT_EXPR && TREE_CODE (gimple_assign_rhs2 (offset_def)) == INTEGER_CST && tree_int_cst_equal (gimple_assign_rhs2 (offset_def), tunit)) { /* The first operand to the MULT_EXPR is the desired index. */ index = gimple_assign_rhs1 (offset_def); } /* If we have idx * tunit + CST * tunit re-associate that. */ else if ((gimple_assign_rhs_code (offset_def) == PLUS_EXPR || gimple_assign_rhs_code (offset_def) == MINUS_EXPR) && TREE_CODE (gimple_assign_rhs1 (offset_def)) == SSA_NAME && TREE_CODE (gimple_assign_rhs2 (offset_def)) == INTEGER_CST && (tmp = div_if_zero_remainder (EXACT_DIV_EXPR, gimple_assign_rhs2 (offset_def), tunit)) != NULL_TREE) { gimple offset_def2 = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (offset_def)); if (is_gimple_assign (offset_def2) && gimple_assign_rhs_code (offset_def2) == MULT_EXPR && TREE_CODE (gimple_assign_rhs2 (offset_def2)) == INTEGER_CST && tree_int_cst_equal (gimple_assign_rhs2 (offset_def2), tunit)) { index = fold_build2 (gimple_assign_rhs_code (offset_def), TREE_TYPE (offset), gimple_assign_rhs1 (offset_def2), tmp); } else return false; } else return false; } /* Replace the pointer addition with array indexing. */ index = force_gimple_operand_gsi (use_stmt_gsi, index, true, NULL_TREE, true, GSI_SAME_STMT); gimple_assign_set_rhs_from_tree (use_stmt_gsi, unshare_expr (def_rhs)); use_stmt = gsi_stmt (*use_stmt_gsi); TREE_OPERAND (TREE_OPERAND (gimple_assign_rhs1 (use_stmt), 0), 1) = index; /* That should have created gimple, so there is no need to record information to undo the propagation. */ fold_stmt_inplace (use_stmt); tidy_after_forward_propagate_addr (use_stmt); return true; }
static bool generate_builtin (struct loop *loop, bitmap partition, bool copy_p) { bool res = false; unsigned i, x = 0; basic_block *bbs; gimple write = NULL; tree op0, op1; gimple_stmt_iterator bsi; tree nb_iter = number_of_exit_cond_executions (loop); if (!nb_iter || nb_iter == chrec_dont_know) return false; bbs = get_loop_body_in_dom_order (loop); for (i = 0; i < loop->num_nodes; i++) { basic_block bb = bbs[i]; for (bsi = gsi_start_phis (bb); !gsi_end_p (bsi); gsi_next (&bsi)) x++; for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi)) { gimple stmt = gsi_stmt (bsi); if (bitmap_bit_p (partition, x++) && is_gimple_assign (stmt) && !is_gimple_reg (gimple_assign_lhs (stmt))) { /* Don't generate the builtins when there are more than one memory write. */ if (write != NULL) goto end; write = stmt; } } } if (!write) goto end; op0 = gimple_assign_lhs (write); op1 = gimple_assign_rhs1 (write); if (!(TREE_CODE (op0) == ARRAY_REF || TREE_CODE (op0) == INDIRECT_REF)) goto end; /* The new statements will be placed before LOOP. */ bsi = gsi_last_bb (loop_preheader_edge (loop)->src); if (gimple_assign_rhs_code (write) == INTEGER_CST && (integer_zerop (op1) || real_zerop (op1))) res = generate_memset_zero (write, op0, nb_iter, bsi); /* If this is the last partition for which we generate code, we have to destroy the loop. */ if (res && !copy_p) { unsigned nbbs = loop->num_nodes; basic_block src = loop_preheader_edge (loop)->src; basic_block dest = single_exit (loop)->dest; prop_phis (dest); make_edge (src, dest, EDGE_FALLTHRU); cancel_loop_tree (loop); for (i = 0; i < nbbs; i++) delete_basic_block (bbs[i]); set_immediate_dominator (CDI_DOMINATORS, dest, recompute_dominator (CDI_DOMINATORS, dest)); } end: free (bbs); return res; }
static bool forward_propagate_addr_expr_1 (tree name, tree def_rhs, gimple_stmt_iterator *use_stmt_gsi, bool single_use_p) { tree lhs, rhs, rhs2, array_ref; tree *rhsp, *lhsp; gimple use_stmt = gsi_stmt (*use_stmt_gsi); enum tree_code rhs_code; gcc_assert (TREE_CODE (def_rhs) == ADDR_EXPR); lhs = gimple_assign_lhs (use_stmt); rhs_code = gimple_assign_rhs_code (use_stmt); rhs = gimple_assign_rhs1 (use_stmt); /* Trivial cases. The use statement could be a trivial copy or a useless conversion. Recurse to the uses of the lhs as copyprop does not copy through different variant pointers and FRE does not catch all useless conversions. Treat the case of a single-use name and a conversion to def_rhs type separate, though. */ if (TREE_CODE (lhs) == SSA_NAME && ((rhs_code == SSA_NAME && rhs == name) || CONVERT_EXPR_CODE_P (rhs_code))) { /* Only recurse if we don't deal with a single use or we cannot do the propagation to the current statement. In particular we can end up with a conversion needed for a non-invariant address which we cannot do in a single statement. */ if (!single_use_p || (!useless_type_conversion_p (TREE_TYPE (lhs), TREE_TYPE (def_rhs)) && !is_gimple_min_invariant (def_rhs))) return forward_propagate_addr_expr (lhs, def_rhs); gimple_assign_set_rhs1 (use_stmt, unshare_expr (def_rhs)); if (useless_type_conversion_p (TREE_TYPE (lhs), TREE_TYPE (def_rhs))) gimple_assign_set_rhs_code (use_stmt, TREE_CODE (def_rhs)); else gimple_assign_set_rhs_code (use_stmt, NOP_EXPR); return true; } /* Now strip away any outer COMPONENT_REF/ARRAY_REF nodes from the LHS. ADDR_EXPR will not appear on the LHS. */ lhsp = gimple_assign_lhs_ptr (use_stmt); while (handled_component_p (*lhsp)) lhsp = &TREE_OPERAND (*lhsp, 0); lhs = *lhsp; /* Now see if the LHS node is an INDIRECT_REF using NAME. If so, propagate the ADDR_EXPR into the use of NAME and fold the result. */ if (TREE_CODE (lhs) == INDIRECT_REF && TREE_OPERAND (lhs, 0) == name && may_propagate_address_into_dereference (def_rhs, lhs) && (lhsp != gimple_assign_lhs_ptr (use_stmt) || useless_type_conversion_p (TREE_TYPE (TREE_OPERAND (def_rhs, 0)), TREE_TYPE (rhs)))) { *lhsp = unshare_expr (TREE_OPERAND (def_rhs, 0)); fold_stmt_inplace (use_stmt); tidy_after_forward_propagate_addr (use_stmt); /* Continue propagating into the RHS if this was not the only use. */ if (single_use_p) return true; } /* Strip away any outer COMPONENT_REF, ARRAY_REF or ADDR_EXPR nodes from the RHS. */ rhsp = gimple_assign_rhs1_ptr (use_stmt); while (handled_component_p (*rhsp) || TREE_CODE (*rhsp) == ADDR_EXPR) rhsp = &TREE_OPERAND (*rhsp, 0); rhs = *rhsp; /* Now see if the RHS node is an INDIRECT_REF using NAME. If so, propagate the ADDR_EXPR into the use of NAME and fold the result. */ if (TREE_CODE (rhs) == INDIRECT_REF && TREE_OPERAND (rhs, 0) == name && may_propagate_address_into_dereference (def_rhs, rhs)) { *rhsp = unshare_expr (TREE_OPERAND (def_rhs, 0)); fold_stmt_inplace (use_stmt); tidy_after_forward_propagate_addr (use_stmt); return true; } /* Now see if the RHS node is an INDIRECT_REF using NAME. If so, propagate the ADDR_EXPR into the use of NAME and try to create a VCE and fold the result. */ if (TREE_CODE (rhs) == INDIRECT_REF && TREE_OPERAND (rhs, 0) == name && TYPE_SIZE (TREE_TYPE (rhs)) && TYPE_SIZE (TREE_TYPE (TREE_OPERAND (def_rhs, 0))) /* Function decls should not be used for VCE either as it could be a function descriptor that we want and not the actual function code. */ && TREE_CODE (TREE_OPERAND (def_rhs, 0)) != FUNCTION_DECL /* We should not convert volatile loads to non volatile loads. */ && !TYPE_VOLATILE (TREE_TYPE (rhs)) && !TYPE_VOLATILE (TREE_TYPE (TREE_OPERAND (def_rhs, 0))) && operand_equal_p (TYPE_SIZE (TREE_TYPE (rhs)), TYPE_SIZE (TREE_TYPE (TREE_OPERAND (def_rhs, 0))), 0)) { tree def_rhs_base, new_rhs = unshare_expr (TREE_OPERAND (def_rhs, 0)); new_rhs = fold_build1 (VIEW_CONVERT_EXPR, TREE_TYPE (rhs), new_rhs); if (TREE_CODE (new_rhs) != VIEW_CONVERT_EXPR) { /* If we have folded the VIEW_CONVERT_EXPR then the result is only valid if we can replace the whole rhs of the use statement. */ if (rhs != gimple_assign_rhs1 (use_stmt)) return false; new_rhs = force_gimple_operand_gsi (use_stmt_gsi, new_rhs, true, NULL, true, GSI_NEW_STMT); gimple_assign_set_rhs1 (use_stmt, new_rhs); tidy_after_forward_propagate_addr (use_stmt); return true; } /* If the defining rhs comes from an indirect reference, then do not convert into a VIEW_CONVERT_EXPR. */ def_rhs_base = TREE_OPERAND (def_rhs, 0); while (handled_component_p (def_rhs_base)) def_rhs_base = TREE_OPERAND (def_rhs_base, 0); if (!INDIRECT_REF_P (def_rhs_base)) { /* We may have arbitrary VIEW_CONVERT_EXPRs in a nested component reference. Place it there and fold the thing. */ *rhsp = new_rhs; fold_stmt_inplace (use_stmt); tidy_after_forward_propagate_addr (use_stmt); return true; } } /* If the use of the ADDR_EXPR is not a POINTER_PLUS_EXPR, there is nothing to do. */ if (gimple_assign_rhs_code (use_stmt) != POINTER_PLUS_EXPR || gimple_assign_rhs1 (use_stmt) != name) return false; /* The remaining cases are all for turning pointer arithmetic into array indexing. They only apply when we have the address of element zero in an array. If that is not the case then there is nothing to do. */ array_ref = TREE_OPERAND (def_rhs, 0); if (TREE_CODE (array_ref) != ARRAY_REF || TREE_CODE (TREE_TYPE (TREE_OPERAND (array_ref, 0))) != ARRAY_TYPE || !integer_zerop (TREE_OPERAND (array_ref, 1))) return false; rhs2 = gimple_assign_rhs2 (use_stmt); /* Try to optimize &x[0] p+ C where C is a multiple of the size of the elements in X into &x[C/element size]. */ if (TREE_CODE (rhs2) == INTEGER_CST) { tree new_rhs = maybe_fold_stmt_addition (gimple_expr_type (use_stmt), array_ref, rhs2); if (new_rhs) { gimple_assign_set_rhs_from_tree (use_stmt_gsi, new_rhs); use_stmt = gsi_stmt (*use_stmt_gsi); update_stmt (use_stmt); tidy_after_forward_propagate_addr (use_stmt); return true; } } /* Try to optimize &x[0] p+ OFFSET where OFFSET is defined by converting a multiplication of an index by the size of the array elements, then the result is converted into the proper type for the arithmetic. */ if (TREE_CODE (rhs2) == SSA_NAME /* Avoid problems with IVopts creating PLUS_EXPRs with a different type than their operands. */ && useless_type_conversion_p (TREE_TYPE (lhs), TREE_TYPE (def_rhs))) return forward_propagate_addr_into_variable_array_index (rhs2, def_rhs, use_stmt_gsi); return false; }
static gimple vect_recog_widen_sum_pattern (gimple last_stmt, tree *type_in, tree *type_out) { gimple stmt; tree oprnd0, oprnd1; stmt_vec_info stmt_vinfo = vinfo_for_stmt (last_stmt); tree type, half_type; gimple pattern_stmt; loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (stmt_vinfo); struct loop *loop = LOOP_VINFO_LOOP (loop_info); tree var; if (!is_gimple_assign (last_stmt)) return NULL; type = gimple_expr_type (last_stmt); /* Look for the following pattern DX = (TYPE) X; sum_1 = DX + sum_0; In which DX is at least double the size of X, and sum_1 has been recognized as a reduction variable. */ /* Starting from LAST_STMT, follow the defs of its uses in search of the above pattern. */ if (gimple_assign_rhs_code (last_stmt) != PLUS_EXPR) return NULL; if (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def) return NULL; oprnd0 = gimple_assign_rhs1 (last_stmt); oprnd1 = gimple_assign_rhs2 (last_stmt); if (!types_compatible_p (TREE_TYPE (oprnd0), type) || !types_compatible_p (TREE_TYPE (oprnd1), type)) return NULL; /* So far so good. Since last_stmt was detected as a (summation) reduction, we know that oprnd1 is the reduction variable (defined by a loop-header phi), and oprnd0 is an ssa-name defined by a stmt in the loop body. Left to check that oprnd0 is defined by a cast from type 'type' to type 'TYPE'. */ if (!widened_name_p (oprnd0, last_stmt, &half_type, &stmt)) return NULL; oprnd0 = gimple_assign_rhs1 (stmt); *type_in = half_type; *type_out = type; /* Pattern detected. Create a stmt to be used to replace the pattern: */ var = vect_recog_temp_ssa_var (type, NULL); pattern_stmt = gimple_build_assign_with_ops (WIDEN_SUM_EXPR, var, oprnd0, oprnd1); SSA_NAME_DEF_STMT (var) = pattern_stmt; if (vect_print_dump_info (REPORT_DETAILS)) { fprintf (vect_dump, "vect_recog_widen_sum_pattern: detected: "); print_gimple_stmt (vect_dump, pattern_stmt, 0, TDF_SLIM); } /* We don't allow changing the order of the computation in the inner-loop when doing outer-loop vectorization. */ gcc_assert (!nested_in_vect_loop_p (loop, last_stmt)); return pattern_stmt; }
static bool process_assignment (gimple stmt, gimple_stmt_iterator call, tree *m, tree *a, tree *ass_var) { tree op0, op1 = NULL_TREE, non_ass_var = NULL_TREE; tree dest = gimple_assign_lhs (stmt); enum tree_code code = gimple_assign_rhs_code (stmt); enum gimple_rhs_class rhs_class = get_gimple_rhs_class (code); tree src_var = gimple_assign_rhs1 (stmt); /* See if this is a simple copy operation of an SSA name to the function result. In that case we may have a simple tail call. Ignore type conversions that can never produce extra code between the function call and the function return. */ if ((rhs_class == GIMPLE_SINGLE_RHS || gimple_assign_cast_p (stmt)) && (TREE_CODE (src_var) == SSA_NAME)) { /* Reject a tailcall if the type conversion might need additional code. */ if (gimple_assign_cast_p (stmt)) { if (TYPE_MODE (TREE_TYPE (dest)) != TYPE_MODE (TREE_TYPE (src_var))) return false; /* Even if the type modes are the same, if the precision of the type is smaller than mode's precision, reduce_to_bit_field_precision would generate additional code. */ if (INTEGRAL_TYPE_P (TREE_TYPE (dest)) && (GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (dest))) > TYPE_PRECISION (TREE_TYPE (dest)))) return false; } if (src_var != *ass_var) return false; *ass_var = dest; return true; } switch (rhs_class) { case GIMPLE_BINARY_RHS: op1 = gimple_assign_rhs2 (stmt); /* Fall through. */ case GIMPLE_UNARY_RHS: op0 = gimple_assign_rhs1 (stmt); break; default: return false; } /* Accumulator optimizations will reverse the order of operations. We can only do that for floating-point types if we're assuming that addition and multiplication are associative. */ if (!flag_associative_math) if (FLOAT_TYPE_P (TREE_TYPE (DECL_RESULT (current_function_decl)))) return false; if (rhs_class == GIMPLE_UNARY_RHS) ; else if (op0 == *ass_var && (non_ass_var = independent_of_stmt_p (op1, stmt, call))) ; else if (op1 == *ass_var && (non_ass_var = independent_of_stmt_p (op0, stmt, call))) ; else return false; switch (code) { case PLUS_EXPR: *a = non_ass_var; *ass_var = dest; return true; case POINTER_PLUS_EXPR: if (op0 != *ass_var) return false; *a = non_ass_var; *ass_var = dest; return true; case MULT_EXPR: *m = non_ass_var; *ass_var = dest; return true; case NEGATE_EXPR: *m = build_minus_one_cst (TREE_TYPE (op0)); *ass_var = dest; return true; case MINUS_EXPR: if (*ass_var == op0) *a = fold_build1 (NEGATE_EXPR, TREE_TYPE (non_ass_var), non_ass_var); else { *m = build_minus_one_cst (TREE_TYPE (non_ass_var)); *a = fold_build1 (NEGATE_EXPR, TREE_TYPE (non_ass_var), non_ass_var); } *ass_var = dest; return true; /* TODO -- Handle POINTER_PLUS_EXPR. */ default: return false; } }
bool gimple_simplify (gimple *stmt, code_helper *rcode, tree *ops, gimple_seq *seq, tree (*valueize)(tree), tree (*top_valueize)(tree)) { switch (gimple_code (stmt)) { case GIMPLE_ASSIGN: { enum tree_code code = gimple_assign_rhs_code (stmt); tree type = TREE_TYPE (gimple_assign_lhs (stmt)); switch (gimple_assign_rhs_class (stmt)) { case GIMPLE_SINGLE_RHS: if (code == REALPART_EXPR || code == IMAGPART_EXPR || code == VIEW_CONVERT_EXPR) { tree op0 = TREE_OPERAND (gimple_assign_rhs1 (stmt), 0); bool valueized = false; op0 = do_valueize (op0, top_valueize, valueized); *rcode = code; ops[0] = op0; return (gimple_resimplify1 (seq, rcode, type, ops, valueize) || valueized); } else if (code == BIT_FIELD_REF) { tree rhs1 = gimple_assign_rhs1 (stmt); tree op0 = TREE_OPERAND (rhs1, 0); bool valueized = false; op0 = do_valueize (op0, top_valueize, valueized); *rcode = code; ops[0] = op0; ops[1] = TREE_OPERAND (rhs1, 1); ops[2] = TREE_OPERAND (rhs1, 2); return (gimple_resimplify3 (seq, rcode, type, ops, valueize) || valueized); } else if (code == SSA_NAME && top_valueize) { tree op0 = gimple_assign_rhs1 (stmt); tree valueized = top_valueize (op0); if (!valueized || op0 == valueized) return false; ops[0] = valueized; *rcode = TREE_CODE (op0); return true; } break; case GIMPLE_UNARY_RHS: { tree rhs1 = gimple_assign_rhs1 (stmt); bool valueized = false; rhs1 = do_valueize (rhs1, top_valueize, valueized); *rcode = code; ops[0] = rhs1; return (gimple_resimplify1 (seq, rcode, type, ops, valueize) || valueized); } case GIMPLE_BINARY_RHS: { tree rhs1 = gimple_assign_rhs1 (stmt); tree rhs2 = gimple_assign_rhs2 (stmt); bool valueized = false; rhs1 = do_valueize (rhs1, top_valueize, valueized); rhs2 = do_valueize (rhs2, top_valueize, valueized); *rcode = code; ops[0] = rhs1; ops[1] = rhs2; return (gimple_resimplify2 (seq, rcode, type, ops, valueize) || valueized); } case GIMPLE_TERNARY_RHS: { bool valueized = false; tree rhs1 = gimple_assign_rhs1 (stmt); /* If this is a [VEC_]COND_EXPR first try to simplify an embedded GENERIC condition. */ if (code == COND_EXPR || code == VEC_COND_EXPR) { if (COMPARISON_CLASS_P (rhs1)) { tree lhs = TREE_OPERAND (rhs1, 0); tree rhs = TREE_OPERAND (rhs1, 1); lhs = do_valueize (lhs, top_valueize, valueized); rhs = do_valueize (rhs, top_valueize, valueized); code_helper rcode2 = TREE_CODE (rhs1); tree ops2[3] = {}; ops2[0] = lhs; ops2[1] = rhs; if ((gimple_resimplify2 (seq, &rcode2, TREE_TYPE (rhs1), ops2, valueize) || valueized) && rcode2.is_tree_code ()) { valueized = true; if (TREE_CODE_CLASS ((enum tree_code)rcode2) == tcc_comparison) rhs1 = build2 (rcode2, TREE_TYPE (rhs1), ops2[0], ops2[1]); else if (rcode2 == SSA_NAME || rcode2 == INTEGER_CST) rhs1 = ops2[0]; else valueized = false; } } } tree rhs2 = gimple_assign_rhs2 (stmt); tree rhs3 = gimple_assign_rhs3 (stmt); rhs1 = do_valueize (rhs1, top_valueize, valueized); rhs2 = do_valueize (rhs2, top_valueize, valueized); rhs3 = do_valueize (rhs3, top_valueize, valueized); *rcode = code; ops[0] = rhs1; ops[1] = rhs2; ops[2] = rhs3; return (gimple_resimplify3 (seq, rcode, type, ops, valueize) || valueized); } default: gcc_unreachable (); } break; } case GIMPLE_CALL: /* ??? This way we can't simplify calls with side-effects. */ if (gimple_call_lhs (stmt) != NULL_TREE && gimple_call_num_args (stmt) >= 1 && gimple_call_num_args (stmt) <= 3) { tree fn = gimple_call_fn (stmt); /* ??? Internal function support missing. */ if (!fn) return false; bool valueized = false; fn = do_valueize (fn, top_valueize, valueized); if (TREE_CODE (fn) != ADDR_EXPR || TREE_CODE (TREE_OPERAND (fn, 0)) != FUNCTION_DECL) return false; tree decl = TREE_OPERAND (fn, 0); if (DECL_BUILT_IN_CLASS (decl) != BUILT_IN_NORMAL || !builtin_decl_implicit (DECL_FUNCTION_CODE (decl)) || !gimple_builtin_call_types_compatible_p (stmt, decl)) return false; tree type = TREE_TYPE (gimple_call_lhs (stmt)); *rcode = DECL_FUNCTION_CODE (decl); for (unsigned i = 0; i < gimple_call_num_args (stmt); ++i) { tree arg = gimple_call_arg (stmt, i); ops[i] = do_valueize (arg, top_valueize, valueized); } switch (gimple_call_num_args (stmt)) { case 1: return (gimple_resimplify1 (seq, rcode, type, ops, valueize) || valueized); case 2: return (gimple_resimplify2 (seq, rcode, type, ops, valueize) || valueized); case 3: return (gimple_resimplify3 (seq, rcode, type, ops, valueize) || valueized); default: gcc_unreachable (); } } break; case GIMPLE_COND: { tree lhs = gimple_cond_lhs (stmt); tree rhs = gimple_cond_rhs (stmt); bool valueized = false; lhs = do_valueize (lhs, top_valueize, valueized); rhs = do_valueize (rhs, top_valueize, valueized); *rcode = gimple_cond_code (stmt); ops[0] = lhs; ops[1] = rhs; return (gimple_resimplify2 (seq, rcode, boolean_type_node, ops, valueize) || valueized); } default: break; } return false; }
/* Compute value of PTR and put it into address RES. */ static void chkp_collect_value (tree ptr, address_t &res) { gimple *def_stmt; enum gimple_code code; enum tree_code rhs_code; address_t addr; tree rhs1; if (TREE_CODE (ptr) == INTEGER_CST) { chkp_add_addr_item (res, ptr, NULL); return; } else if (TREE_CODE (ptr) == ADDR_EXPR) { chkp_collect_addr_value (ptr, res); return; } else if (TREE_CODE (ptr) != SSA_NAME) { chkp_add_addr_item (res, integer_one_node, ptr); return; } /* Now we handle the case when polynomial is computed for SSA NAME. */ def_stmt = SSA_NAME_DEF_STMT (ptr); code = gimple_code (def_stmt); /* Currently we do not walk through statements other than assignment. */ if (code != GIMPLE_ASSIGN) { chkp_add_addr_item (res, integer_one_node, ptr); return; } rhs_code = gimple_assign_rhs_code (def_stmt); rhs1 = gimple_assign_rhs1 (def_stmt); switch (rhs_code) { case SSA_NAME: case INTEGER_CST: case ADDR_EXPR: chkp_collect_value (rhs1, res); break; case PLUS_EXPR: case POINTER_PLUS_EXPR: chkp_collect_value (rhs1, res); addr.pol.create (0); chkp_collect_value (gimple_assign_rhs2 (def_stmt), addr); chkp_add_addr_addr (res, addr); addr.pol.release (); break; case MINUS_EXPR: chkp_collect_value (rhs1, res); addr.pol.create (0); chkp_collect_value (gimple_assign_rhs2 (def_stmt), addr); chkp_sub_addr_addr (res, addr); addr.pol.release (); break; case MULT_EXPR: if (TREE_CODE (rhs1) == SSA_NAME && TREE_CODE (gimple_assign_rhs2 (def_stmt)) == INTEGER_CST) { chkp_collect_value (rhs1, res); chkp_mult_addr (res, gimple_assign_rhs2 (def_stmt)); } else if (TREE_CODE (gimple_assign_rhs2 (def_stmt)) == SSA_NAME && TREE_CODE (rhs1) == INTEGER_CST) { chkp_collect_value (gimple_assign_rhs2 (def_stmt), res); chkp_mult_addr (res, rhs1); } else chkp_add_addr_item (res, integer_one_node, ptr); break; default: chkp_add_addr_item (res, integer_one_node, ptr); break; } }