tree gfc_truthvalue_conversion (tree expr) { switch (TREE_CODE (TREE_TYPE (expr))) { case BOOLEAN_TYPE: if (TREE_TYPE (expr) == boolean_type_node) return expr; else if (COMPARISON_CLASS_P (expr)) { TREE_TYPE (expr) = boolean_type_node; return expr; } else if (TREE_CODE (expr) == NOP_EXPR) return fold_build1 (NOP_EXPR, boolean_type_node, TREE_OPERAND (expr, 0)); else return fold_build1 (NOP_EXPR, boolean_type_node, expr); case INTEGER_TYPE: if (TREE_CODE (expr) == INTEGER_CST) return integer_zerop (expr) ? boolean_false_node : boolean_true_node; else return fold_build2 (NE_EXPR, boolean_type_node, expr, build_int_cst (TREE_TYPE (expr), 0)); default: internal_error ("Unexpected type in truthvalue_conversion"); } }
static tree add_elt_to_tree (tree expr, tree type, tree elt, double_int scale, aff_tree *comb) { enum tree_code code; tree type1 = type; if (POINTER_TYPE_P (type)) type1 = sizetype; scale = double_int_ext_for_comb (scale, comb); elt = fold_convert (type1, elt); if (scale.is_one ()) { if (!expr) return fold_convert (type, elt); if (POINTER_TYPE_P (type)) return fold_build_pointer_plus (expr, elt); return fold_build2 (PLUS_EXPR, type, expr, elt); } if (scale.is_minus_one ()) { if (!expr) return fold_convert (type, fold_build1 (NEGATE_EXPR, type1, elt)); if (POINTER_TYPE_P (type)) { elt = fold_build1 (NEGATE_EXPR, type1, elt); return fold_build_pointer_plus (expr, elt); } return fold_build2 (MINUS_EXPR, type, expr, elt); } if (!expr) return fold_convert (type, fold_build2 (MULT_EXPR, type1, elt, double_int_to_tree (type1, scale))); if (scale.is_negative ()) { code = MINUS_EXPR; scale = -scale; } else code = PLUS_EXPR; elt = fold_build2 (MULT_EXPR, type1, elt, double_int_to_tree (type1, scale)); if (POINTER_TYPE_P (type)) { if (code == MINUS_EXPR) elt = fold_build1 (NEGATE_EXPR, type1, elt); return fold_build_pointer_plus (expr, elt); } return fold_build2 (code, type, expr, elt); }
/* We are assuming that given a SIMPLE val, the result will be a SIMPLE rhs. If this is not the case, we will abort with an internal error. */ tree convert (tree type, tree expr) { tree e = expr; enum tree_code code = TREE_CODE (type); if (type == TREE_TYPE (expr) || TREE_CODE (expr) == ERROR_MARK || code == ERROR_MARK || TREE_CODE (TREE_TYPE (expr)) == ERROR_MARK) return expr; if (TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (TREE_TYPE (expr))) return fold_build1 (NOP_EXPR, type, expr); if (TREE_CODE (TREE_TYPE (expr)) == ERROR_MARK) return error_mark_node; if (TREE_CODE (TREE_TYPE (expr)) == VOID_TYPE) { error ("void value not ignored as it ought to be"); return error_mark_node; } if (code == VOID_TYPE) return fold_build1 (CONVERT_EXPR, type, e); #if 0 /* This is incorrect. A truncation can't be stripped this way. Extensions will be stripped by the use of get_unwidened. */ if (TREE_CODE (expr) == NOP_EXPR) return convert (type, TREE_OPERAND (expr, 0)); #endif if (code == INTEGER_TYPE || code == ENUMERAL_TYPE) return fold (convert_to_integer (type, e)); if (code == BOOLEAN_TYPE) { e = gfc_truthvalue_conversion (e); /* If we have a NOP_EXPR, we must fold it here to avoid infinite recursion between fold () and convert (). */ if (TREE_CODE (e) == NOP_EXPR) return fold_build1 (NOP_EXPR, type, TREE_OPERAND (e, 0)); else return fold_build1 (NOP_EXPR, type, e); } if (code == POINTER_TYPE || code == REFERENCE_TYPE) return fold (convert_to_pointer (type, e)); if (code == REAL_TYPE) return fold (convert_to_real (type, e)); if (code == COMPLEX_TYPE) return fold (convert_to_complex (type, e)); if (code == VECTOR_TYPE) return fold (convert_to_vector (type, e)); error ("conversion to non-scalar type requested"); return error_mark_node; }
tree ubsan_encode_value (tree t, bool in_expand_p) { tree type = TREE_TYPE (t); const unsigned int bitsize = GET_MODE_BITSIZE (TYPE_MODE (type)); if (bitsize <= POINTER_SIZE) switch (TREE_CODE (type)) { case BOOLEAN_TYPE: case ENUMERAL_TYPE: case INTEGER_TYPE: return fold_build1 (NOP_EXPR, pointer_sized_int_node, t); case REAL_TYPE: { tree itype = build_nonstandard_integer_type (bitsize, true); t = fold_build1 (VIEW_CONVERT_EXPR, itype, t); return fold_convert (pointer_sized_int_node, t); } default: gcc_unreachable (); } else { if (!DECL_P (t) || !TREE_ADDRESSABLE (t)) { /* The reason for this is that we don't want to pessimize code by making vars unnecessarily addressable. */ tree var = create_tmp_var (type, NULL); tree tem = build2 (MODIFY_EXPR, void_type_node, var, t); if (in_expand_p) { rtx mem = assign_stack_temp_for_type (TYPE_MODE (type), GET_MODE_SIZE (TYPE_MODE (type)), type); SET_DECL_RTL (var, mem); expand_assignment (var, t, false); return build_fold_addr_expr (var); } t = build_fold_addr_expr (var); return build2 (COMPOUND_EXPR, TREE_TYPE (t), tem, t); } else return build_fold_addr_expr (t); } }
tree convert (tree type, tree expr) { tree e = expr; enum tree_code code = TREE_CODE (type); if (type == TREE_TYPE (expr) || TREE_CODE (expr) == ERROR_MARK || code == ERROR_MARK || TREE_CODE (TREE_TYPE (expr)) == ERROR_MARK) return expr; if (TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (TREE_TYPE (expr))) return fold_build1 (NOP_EXPR, type, expr); if (TREE_CODE (TREE_TYPE (expr)) == ERROR_MARK) return error_mark_node; if (TREE_CODE (TREE_TYPE (expr)) == VOID_TYPE) { error ("void value not ignored as it ought to be"); return error_mark_node; } if (code == VOID_TYPE) return build1 (CONVERT_EXPR, type, e); if (code == INTEGER_TYPE || code == ENUMERAL_TYPE) return fold (convert_to_integer (type, e)); if (code == BOOLEAN_TYPE) { tree t = expr; /* If it returns a NOP_EXPR, we must fold it here to avoid infinite recursion between fold () and convert (). */ if (TREE_CODE (t) == NOP_EXPR) return fold_build1 (NOP_EXPR, type, TREE_OPERAND (t, 0)); else return fold_build1 (NOP_EXPR, type, t); } if (code == POINTER_TYPE || code == REFERENCE_TYPE) return fold (convert_to_pointer (type, e)); if (code == REAL_TYPE) return fold (convert_to_real (type, e)); if (code == COMPLEX_TYPE) return fold (convert_to_complex (type, e)); if (code == VECTOR_TYPE) return fold (convert_to_vector (type, e)); error ("conversion to non-scalar type requested"); return error_mark_node; }
static tree compute_object_offset (tree expr, tree var) { enum tree_code code = PLUS_EXPR; tree base, off, t; if (expr == var) return size_zero_node; switch (TREE_CODE (expr)) { case COMPONENT_REF: base = compute_object_offset (TREE_OPERAND (expr, 0), var); if (base == error_mark_node) return base; t = TREE_OPERAND (expr, 1); off = size_binop (PLUS_EXPR, DECL_FIELD_OFFSET (t), size_int (tree_low_cst (DECL_FIELD_BIT_OFFSET (t), 1) / BITS_PER_UNIT)); break; case REALPART_EXPR: case NOP_EXPR: case CONVERT_EXPR: case VIEW_CONVERT_EXPR: case NON_LVALUE_EXPR: return compute_object_offset (TREE_OPERAND (expr, 0), var); case IMAGPART_EXPR: base = compute_object_offset (TREE_OPERAND (expr, 0), var); if (base == error_mark_node) return base; off = TYPE_SIZE_UNIT (TREE_TYPE (expr)); break; case ARRAY_REF: base = compute_object_offset (TREE_OPERAND (expr, 0), var); if (base == error_mark_node) return base; t = TREE_OPERAND (expr, 1); if (TREE_CODE (t) == INTEGER_CST && tree_int_cst_sgn (t) < 0) { code = MINUS_EXPR; t = fold_build1 (NEGATE_EXPR, TREE_TYPE (t), t); } t = convert (sizetype, t); off = size_binop (MULT_EXPR, TYPE_SIZE_UNIT (TREE_TYPE (expr)), t); break; default: return error_mark_node; } return size_binop (code, base, off); }
/* Build a constant tree with type TREE_TYPE and value according to LAT. Return the tree, or, if it is not possible to convert such value to TREE_TYPE, NULL. */ static tree build_const_val (struct ipcp_lattice *lat, tree tree_type) { tree val; gcc_assert (ipcp_lat_is_const (lat)); val = lat->constant; if (!useless_type_conversion_p (tree_type, TREE_TYPE (val))) { if (fold_convertible_p (tree_type, val)) return fold_build1 (NOP_EXPR, tree_type, val); else if (TYPE_SIZE (tree_type) == TYPE_SIZE (TREE_TYPE (val))) return fold_build1 (VIEW_CONVERT_EXPR, tree_type, val); else return NULL; } return val; }
Uint UI_From_gnu (tree Input) { tree gnu_type = TREE_TYPE (Input), gnu_base, gnu_temp; /* UI_Base is defined so that 5 Uint digits is sufficient to hold the largest possible signed 64-bit value. */ const int Max_For_Dint = 5; int v[Max_For_Dint], i; Vector_Template temp; Int_Vector vec; #if HOST_BITS_PER_WIDE_INT == 64 /* On 64-bit hosts, tree_fits_shwi_p tells whether the input fits in a signed 64-bit integer. Then a truncation tells whether it fits in a signed 32-bit integer. */ if (tree_fits_shwi_p (Input)) { HOST_WIDE_INT hw_input = tree_to_shwi (Input); if (hw_input == (int) hw_input) return UI_From_Int (hw_input); } else return No_Uint; #else /* On 32-bit hosts, tree_fits_shwi_p tells whether the input fits in a signed 32-bit integer. Then a sign test tells whether it fits in a signed 64-bit integer. */ if (tree_fits_shwi_p (Input)) return UI_From_Int (tree_to_shwi (Input)); gcc_assert (TYPE_PRECISION (gnu_type) <= 64); if (TYPE_UNSIGNED (gnu_type) && TYPE_PRECISION (gnu_type) == 64 && wi::neg_p (Input, SIGNED)) return No_Uint; #endif gnu_base = build_int_cst (gnu_type, UI_Base); gnu_temp = Input; for (i = Max_For_Dint - 1; i >= 0; i--) { v[i] = tree_to_shwi (fold_build1 (ABS_EXPR, gnu_type, fold_build2 (TRUNC_MOD_EXPR, gnu_type, gnu_temp, gnu_base))); gnu_temp = fold_build2 (TRUNC_DIV_EXPR, gnu_type, gnu_temp, gnu_base); } temp.Low_Bound = 1; temp.High_Bound = Max_For_Dint; vec.Bounds = &temp; vec.Array = v; return Vector_To_Uint (vec, tree_int_cst_sgn (Input) < 0); }
/* Reallocate MEM so it has SIZE bytes of data. This behaves like the following pseudo-code: void * internal_realloc (void *mem, size_t size) { if (size < 0) runtime_error ("Attempt to allocate a negative amount of memory."); res = realloc (mem, size); if (!res && size != 0) _gfortran_os_error ("Out of memory"); if (size == 0) return NULL; return res; } */ tree gfc_call_realloc (stmtblock_t * block, tree mem, tree size) { tree msg, res, negative, nonzero, zero, null_result, tmp; tree type = TREE_TYPE (mem); size = gfc_evaluate_now (size, block); if (TREE_TYPE (size) != TREE_TYPE (size_type_node)) size = fold_convert (size_type_node, size); /* Create a variable to hold the result. */ res = gfc_create_var (type, NULL); /* size < 0 ? */ negative = fold_build2 (LT_EXPR, boolean_type_node, size, build_int_cst (size_type_node, 0)); msg = gfc_build_addr_expr (pchar_type_node, gfc_build_localized_cstring_const ("Attempt to allocate a negative amount of memory.")); tmp = fold_build3 (COND_EXPR, void_type_node, negative, build_call_expr_loc (input_location, gfor_fndecl_runtime_error, 1, msg), build_empty_stmt (input_location)); gfc_add_expr_to_block (block, tmp); /* Call realloc and check the result. */ tmp = build_call_expr_loc (input_location, built_in_decls[BUILT_IN_REALLOC], 2, fold_convert (pvoid_type_node, mem), size); gfc_add_modify (block, res, fold_convert (type, tmp)); null_result = fold_build2 (EQ_EXPR, boolean_type_node, res, build_int_cst (pvoid_type_node, 0)); nonzero = fold_build2 (NE_EXPR, boolean_type_node, size, build_int_cst (size_type_node, 0)); null_result = fold_build2 (TRUTH_AND_EXPR, boolean_type_node, null_result, nonzero); msg = gfc_build_addr_expr (pchar_type_node, gfc_build_localized_cstring_const ("Out of memory")); tmp = fold_build3 (COND_EXPR, void_type_node, null_result, build_call_expr_loc (input_location, gfor_fndecl_os_error, 1, msg), build_empty_stmt (input_location)); gfc_add_expr_to_block (block, tmp); /* if (size == 0) then the result is NULL. */ tmp = fold_build2 (MODIFY_EXPR, type, res, build_int_cst (type, 0)); zero = fold_build1 (TRUTH_NOT_EXPR, boolean_type_node, nonzero); tmp = fold_build3 (COND_EXPR, void_type_node, zero, tmp, build_empty_stmt (input_location)); gfc_add_expr_to_block (block, tmp); return res; }
Uint UI_From_gnu (tree Input) { tree gnu_type = TREE_TYPE (Input), gnu_base, gnu_temp; /* UI_Base is defined so that 5 Uint digits is sufficient to hold the largest possible signed 64-bit value. */ const int Max_For_Dint = 5; int v[Max_For_Dint], i; Vector_Template temp; Int_Vector vec; #if HOST_BITS_PER_WIDE_INT == 64 /* On 64-bit hosts, host_integerp tells whether the input fits in a signed 64-bit integer. Then a truncation tells whether it fits in a signed 32-bit integer. */ if (host_integerp (Input, 0)) { HOST_WIDE_INT hw_input = TREE_INT_CST_LOW (Input); if (hw_input == (int) hw_input) return UI_From_Int (hw_input); } else return No_Uint; #else /* On 32-bit hosts, host_integerp tells whether the input fits in a signed 32-bit integer. Then a sign test tells whether it fits in a signed 64-bit integer. */ if (host_integerp (Input, 0)) return UI_From_Int (TREE_INT_CST_LOW (Input)); else if (TREE_INT_CST_HIGH (Input) < 0 && TYPE_UNSIGNED (gnu_type) && !(TREE_CODE (gnu_type) == INTEGER_TYPE && TYPE_IS_SIZETYPE (gnu_type))) return No_Uint; #endif gnu_base = build_int_cst (gnu_type, UI_Base); gnu_temp = Input; for (i = Max_For_Dint - 1; i >= 0; i--) { v[i] = tree_low_cst (fold_build1 (ABS_EXPR, gnu_type, fold_build2 (TRUNC_MOD_EXPR, gnu_type, gnu_temp, gnu_base)), 0); gnu_temp = fold_build2 (TRUNC_DIV_EXPR, gnu_type, gnu_temp, gnu_base); } temp.Low_Bound = 1, temp.High_Bound = Max_For_Dint; vec.Array = v, vec.Bounds = &temp; return Vector_To_Uint (vec, tree_int_cst_sgn (Input) < 0); }
tree ubsan_encode_value (tree t) { tree type = TREE_TYPE (t); switch (TREE_CODE (type)) { case INTEGER_TYPE: if (TYPE_PRECISION (type) <= POINTER_SIZE) return fold_build1 (NOP_EXPR, pointer_sized_int_node, t); else return build_fold_addr_expr (t); case REAL_TYPE: { unsigned int bitsize = GET_MODE_BITSIZE (TYPE_MODE (type)); if (bitsize <= POINTER_SIZE) { tree itype = build_nonstandard_integer_type (bitsize, true); t = fold_build1 (VIEW_CONVERT_EXPR, itype, t); return fold_convert (pointer_sized_int_node, t); } else { if (!TREE_ADDRESSABLE (t)) { /* The reason for this is that we don't want to pessimize code by making vars unnecessarily addressable. */ tree var = create_tmp_var (TREE_TYPE (t), NULL); tree tem = build2 (MODIFY_EXPR, void_type_node, var, t); t = build_fold_addr_expr (var); return build2 (COMPOUND_EXPR, TREE_TYPE (t), tem, t); } else return build_fold_addr_expr (t); } } default: gcc_unreachable (); } }
tree aarch64_fold_builtin (tree fndecl, int n_args ATTRIBUTE_UNUSED, tree *args, bool ignore ATTRIBUTE_UNUSED) { int fcode = DECL_FUNCTION_CODE (fndecl); tree type = TREE_TYPE (TREE_TYPE (fndecl)); switch (fcode) { BUILTIN_VALLDI (UNOP, abs, 2) return fold_build1 (ABS_EXPR, type, args[0]); break; VAR1 (UNOP, floatv2si, 2, v2sf) VAR1 (UNOP, floatv4si, 2, v4sf) VAR1 (UNOP, floatv2di, 2, v2df) return fold_build1 (FLOAT_EXPR, type, args[0]); default: break; } return NULL_TREE; }
tree build_exception_object_ref (tree type) { tree obj; /* Java only passes object via pointer and doesn't require adjusting. The java object is immediately before the generic exception header. */ obj = build_exception_object_var (); obj = fold_convert (build_pointer_type (type), obj); obj = fold_build_pointer_plus (obj, fold_build1 (NEGATE_EXPR, sizetype, TYPE_SIZE_UNIT (TREE_TYPE (obj)))); obj = build1 (INDIRECT_REF, type, obj); return obj; }
tree gfc_build_addr_expr (tree type, tree t) { tree base_type = TREE_TYPE (t); tree natural_type; if (type && POINTER_TYPE_P (type) && TREE_CODE (base_type) == ARRAY_TYPE && TYPE_MAIN_VARIANT (TREE_TYPE (type)) == TYPE_MAIN_VARIANT (TREE_TYPE (base_type))) { tree min_val = size_zero_node; tree type_domain = TYPE_DOMAIN (base_type); if (type_domain && TYPE_MIN_VALUE (type_domain)) min_val = TYPE_MIN_VALUE (type_domain); t = fold (build4 (ARRAY_REF, TREE_TYPE (type), t, min_val, NULL_TREE, NULL_TREE)); natural_type = type; } else natural_type = build_pointer_type (base_type); if (TREE_CODE (t) == INDIRECT_REF) { if (!type) type = natural_type; t = TREE_OPERAND (t, 0); natural_type = TREE_TYPE (t); } else { tree base = get_base_address (t); if (base && DECL_P (base)) TREE_ADDRESSABLE (base) = 1; t = fold_build1 (ADDR_EXPR, natural_type, t); } if (type && natural_type != type) t = convert (type, t); return t; }
static tree add_elt_to_tree (tree expr, tree type, tree elt, double_int scale, aff_tree *comb) { enum tree_code code; scale = double_int_ext_for_comb (scale, comb); elt = fold_convert (type, elt); if (double_int_one_p (scale)) { if (!expr) return elt; return fold_build2 (PLUS_EXPR, type, expr, elt); } if (double_int_minus_one_p (scale)) { if (!expr) return fold_build1 (NEGATE_EXPR, type, elt); return fold_build2 (MINUS_EXPR, type, expr, elt); } if (!expr) return fold_build2 (MULT_EXPR, type, elt, double_int_to_tree (type, scale)); if (double_int_negative_p (scale)) { code = MINUS_EXPR; scale = double_int_neg (scale); } else code = PLUS_EXPR; elt = fold_build2 (MULT_EXPR, type, elt, double_int_to_tree (type, scale)); return fold_build2 (code, type, expr, elt); }
static tree add_elt_to_tree (tree expr, tree type, tree elt, const widest_int &scale_in) { enum tree_code code; widest_int scale = wide_int_ext_for_comb (scale_in, type); elt = fold_convert (type, elt); if (scale == 1) { if (!expr) return elt; return fold_build2 (PLUS_EXPR, type, expr, elt); } if (scale == -1) { if (!expr) return fold_build1 (NEGATE_EXPR, type, elt); return fold_build2 (MINUS_EXPR, type, expr, elt); } if (!expr) return fold_build2 (MULT_EXPR, type, elt, wide_int_to_tree (type, scale)); if (wi::neg_p (scale)) { code = MINUS_EXPR; scale = -scale; } else code = PLUS_EXPR; elt = fold_build2 (MULT_EXPR, type, elt, wide_int_to_tree (type, scale)); return fold_build2 (code, type, expr, elt); }
static void most_expensive_mult_to_index (tree type, struct mem_address *parts, aff_tree *addr, bool speed) { addr_space_t as = TYPE_ADDR_SPACE (type); enum machine_mode address_mode = targetm.addr_space.address_mode (as); HOST_WIDE_INT coef; double_int best_mult, amult, amult_neg; unsigned best_mult_cost = 0, acost; tree mult_elt = NULL_TREE, elt; unsigned i, j; enum tree_code op_code; best_mult = double_int_zero; for (i = 0; i < addr->n; i++) { if (!double_int_fits_in_shwi_p (addr->elts[i].coef)) continue; coef = double_int_to_shwi (addr->elts[i].coef); if (coef == 1 || !multiplier_allowed_in_address_p (coef, TYPE_MODE (type), as)) continue; acost = multiply_by_cost (coef, address_mode, speed); if (acost > best_mult_cost) { best_mult_cost = acost; best_mult = addr->elts[i].coef; } } if (!best_mult_cost) return; /* Collect elements multiplied by best_mult. */ for (i = j = 0; i < addr->n; i++) { amult = addr->elts[i].coef; amult_neg = double_int_ext_for_comb (double_int_neg (amult), addr); if (double_int_equal_p (amult, best_mult)) op_code = PLUS_EXPR; else if (double_int_equal_p (amult_neg, best_mult)) op_code = MINUS_EXPR; else { addr->elts[j] = addr->elts[i]; j++; continue; } elt = fold_convert (sizetype, addr->elts[i].val); if (mult_elt) mult_elt = fold_build2 (op_code, sizetype, mult_elt, elt); else if (op_code == PLUS_EXPR) mult_elt = elt; else mult_elt = fold_build1 (NEGATE_EXPR, sizetype, elt); } addr->n = j; parts->index = mult_elt; parts->step = double_int_to_tree (sizetype, best_mult); }
tree convert (tree type, tree expr) { tree e = expr; enum tree_code code = TREE_CODE (type); const char *invalid_conv_diag; if (type == error_mark_node || expr == error_mark_node || TREE_TYPE (expr) == error_mark_node) return error_mark_node; if ((invalid_conv_diag = targetm.invalid_conversion (TREE_TYPE (expr), type))) { error (invalid_conv_diag); return error_mark_node; } if (type == TREE_TYPE (expr)) return expr; if (TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (TREE_TYPE (expr))) return fold_build1 (NOP_EXPR, type, expr); if (TREE_CODE (TREE_TYPE (expr)) == ERROR_MARK) return error_mark_node; if (TREE_CODE (TREE_TYPE (expr)) == VOID_TYPE) { error ("void value not ignored as it ought to be"); return error_mark_node; } if (code == VOID_TYPE) return build1 (CONVERT_EXPR, type, e); #if 0 /* This is incorrect. A truncation can't be stripped this way. Extensions will be stripped by the use of get_unwidened. */ if (TREE_CODE (expr) == NOP_EXPR) return convert (type, TREE_OPERAND (expr, 0)); #endif if (code == INTEGER_TYPE || code == ENUMERAL_TYPE) return fold (convert_to_integer (type, e)); if (code == BOOLEAN_TYPE) { tree t = c_objc_common_truthvalue_conversion (expr); if (TREE_CODE (t) == ERROR_MARK) return t; /* If it returns a NOP_EXPR, we must fold it here to avoid infinite recursion between fold () and convert (). */ if (TREE_CODE (t) == NOP_EXPR) return fold_build1 (NOP_EXPR, type, TREE_OPERAND (t, 0)); else return fold_build1 (NOP_EXPR, type, t); } if (code == POINTER_TYPE || code == REFERENCE_TYPE) return fold (convert_to_pointer (type, e)); if (code == REAL_TYPE) return fold (convert_to_real (type, e)); if (code == COMPLEX_TYPE) return fold (convert_to_complex (type, e)); if (code == VECTOR_TYPE) return fold (convert_to_vector (type, e)); if ((code == RECORD_TYPE || code == UNION_TYPE) && lang_hooks.types_compatible_p (type, TREE_TYPE (expr))) return e; error ("conversion to non-scalar type requested"); return error_mark_node; }
static tree abs_builtin (tree method_return_type, tree orig_call) { return fold_build1 (ABS_EXPR, method_return_type, CALL_EXPR_ARG (orig_call, 0)); }
static tree compute_object_offset (const_tree expr, const_tree var) { enum tree_code code = PLUS_EXPR; tree base, off, t; if (expr == var) return size_zero_node; switch (TREE_CODE (expr)) { case COMPONENT_REF: base = compute_object_offset (TREE_OPERAND (expr, 0), var); if (base == error_mark_node) return base; t = TREE_OPERAND (expr, 1); off = size_binop (PLUS_EXPR, DECL_FIELD_OFFSET (t), size_int (tree_to_uhwi (DECL_FIELD_BIT_OFFSET (t)) / BITS_PER_UNIT)); break; case REALPART_EXPR: CASE_CONVERT: case VIEW_CONVERT_EXPR: case NON_LVALUE_EXPR: return compute_object_offset (TREE_OPERAND (expr, 0), var); case IMAGPART_EXPR: base = compute_object_offset (TREE_OPERAND (expr, 0), var); if (base == error_mark_node) return base; off = TYPE_SIZE_UNIT (TREE_TYPE (expr)); break; case ARRAY_REF: base = compute_object_offset (TREE_OPERAND (expr, 0), var); if (base == error_mark_node) return base; t = TREE_OPERAND (expr, 1); tree low_bound, unit_size; low_bound = array_ref_low_bound (CONST_CAST_TREE (expr)); unit_size = array_ref_element_size (CONST_CAST_TREE (expr)); if (! integer_zerop (low_bound)) t = fold_build2 (MINUS_EXPR, TREE_TYPE (t), t, low_bound); if (TREE_CODE (t) == INTEGER_CST && tree_int_cst_sgn (t) < 0) { code = MINUS_EXPR; t = fold_build1 (NEGATE_EXPR, TREE_TYPE (t), t); } t = fold_convert (sizetype, t); off = size_binop (MULT_EXPR, unit_size, t); break; case MEM_REF: gcc_assert (TREE_CODE (TREE_OPERAND (expr, 0)) == ADDR_EXPR); return wide_int_to_tree (sizetype, mem_ref_offset (expr)); default: return error_mark_node; } return size_binop (code, base, off); }
static tree gfc_trans_omp_atomic (gfc_code *code) { gfc_se lse; gfc_se rse; gfc_expr *expr2, *e; gfc_symbol *var; stmtblock_t block; tree lhsaddr, type, rhs, x; enum tree_code op = ERROR_MARK; bool var_on_left = false; code = code->block->next; gcc_assert (code->op == EXEC_ASSIGN); gcc_assert (code->next == NULL); var = code->expr1->symtree->n.sym; gfc_init_se (&lse, NULL); gfc_init_se (&rse, NULL); gfc_start_block (&block); gfc_conv_expr (&lse, code->expr1); gfc_add_block_to_block (&block, &lse.pre); type = TREE_TYPE (lse.expr); lhsaddr = gfc_build_addr_expr (NULL, lse.expr); expr2 = code->expr2; if (expr2->expr_type == EXPR_FUNCTION && expr2->value.function.isym->id == GFC_ISYM_CONVERSION) expr2 = expr2->value.function.actual->expr; if (expr2->expr_type == EXPR_OP) { gfc_expr *e; switch (expr2->value.op.op) { case INTRINSIC_PLUS: op = PLUS_EXPR; break; case INTRINSIC_TIMES: op = MULT_EXPR; break; case INTRINSIC_MINUS: op = MINUS_EXPR; break; case INTRINSIC_DIVIDE: if (expr2->ts.type == BT_INTEGER) op = TRUNC_DIV_EXPR; else op = RDIV_EXPR; break; case INTRINSIC_AND: op = TRUTH_ANDIF_EXPR; break; case INTRINSIC_OR: op = TRUTH_ORIF_EXPR; break; case INTRINSIC_EQV: op = EQ_EXPR; break; case INTRINSIC_NEQV: op = NE_EXPR; break; default: gcc_unreachable (); } e = expr2->value.op.op1; if (e->expr_type == EXPR_FUNCTION && e->value.function.isym->id == GFC_ISYM_CONVERSION) e = e->value.function.actual->expr; if (e->expr_type == EXPR_VARIABLE && e->symtree != NULL && e->symtree->n.sym == var) { expr2 = expr2->value.op.op2; var_on_left = true; } else { e = expr2->value.op.op2; if (e->expr_type == EXPR_FUNCTION && e->value.function.isym->id == GFC_ISYM_CONVERSION) e = e->value.function.actual->expr; gcc_assert (e->expr_type == EXPR_VARIABLE && e->symtree != NULL && e->symtree->n.sym == var); expr2 = expr2->value.op.op1; var_on_left = false; } gfc_conv_expr (&rse, expr2); gfc_add_block_to_block (&block, &rse.pre); } else { gcc_assert (expr2->expr_type == EXPR_FUNCTION); switch (expr2->value.function.isym->id) { case GFC_ISYM_MIN: op = MIN_EXPR; break; case GFC_ISYM_MAX: op = MAX_EXPR; break; case GFC_ISYM_IAND: op = BIT_AND_EXPR; break; case GFC_ISYM_IOR: op = BIT_IOR_EXPR; break; case GFC_ISYM_IEOR: op = BIT_XOR_EXPR; break; default: gcc_unreachable (); } e = expr2->value.function.actual->expr; gcc_assert (e->expr_type == EXPR_VARIABLE && e->symtree != NULL && e->symtree->n.sym == var); gfc_conv_expr (&rse, expr2->value.function.actual->next->expr); gfc_add_block_to_block (&block, &rse.pre); if (expr2->value.function.actual->next->next != NULL) { tree accum = gfc_create_var (TREE_TYPE (rse.expr), NULL); gfc_actual_arglist *arg; gfc_add_modify (&block, accum, rse.expr); for (arg = expr2->value.function.actual->next->next; arg; arg = arg->next) { gfc_init_block (&rse.pre); gfc_conv_expr (&rse, arg->expr); gfc_add_block_to_block (&block, &rse.pre); x = fold_build2 (op, TREE_TYPE (accum), accum, rse.expr); gfc_add_modify (&block, accum, x); } rse.expr = accum; } expr2 = expr2->value.function.actual->next->expr; } lhsaddr = save_expr (lhsaddr); rhs = gfc_evaluate_now (rse.expr, &block); x = convert (TREE_TYPE (rhs), build_fold_indirect_ref_loc (input_location, lhsaddr)); if (var_on_left) x = fold_build2 (op, TREE_TYPE (rhs), x, rhs); else x = fold_build2 (op, TREE_TYPE (rhs), rhs, x); if (TREE_CODE (TREE_TYPE (rhs)) == COMPLEX_TYPE && TREE_CODE (type) != COMPLEX_TYPE) x = fold_build1 (REALPART_EXPR, TREE_TYPE (TREE_TYPE (rhs)), x); x = build2_v (OMP_ATOMIC, lhsaddr, convert (type, x)); gfc_add_expr_to_block (&block, x); gfc_add_block_to_block (&block, &lse.pre); gfc_add_block_to_block (&block, &rse.pre); return gfc_finish_block (&block); }
tree convert (tree type, tree expr) { tree e = expr; enum tree_code code = TREE_CODE (type); const char *invalid_conv_diag; tree ret; location_t loc = EXPR_LOCATION (expr); if (type == error_mark_node || expr == error_mark_node || TREE_TYPE (expr) == error_mark_node) return error_mark_node; if ((invalid_conv_diag = targetm.invalid_conversion (TREE_TYPE (expr), type))) { error (invalid_conv_diag); return error_mark_node; } if (type == TREE_TYPE (expr)) return expr; ret = targetm.convert_to_type (type, expr); if (ret) return ret; STRIP_TYPE_NOPS (e); if (TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (TREE_TYPE (expr)) && (TREE_CODE (TREE_TYPE (expr)) != COMPLEX_TYPE || TREE_CODE (e) == COMPLEX_EXPR)) return fold_convert_loc (loc, type, expr); if (TREE_CODE (TREE_TYPE (expr)) == ERROR_MARK) return error_mark_node; if (TREE_CODE (TREE_TYPE (expr)) == VOID_TYPE) { error ("void value not ignored as it ought to be"); return error_mark_node; } switch (code) { case VOID_TYPE: return fold_convert_loc (loc, type, e); case INTEGER_TYPE: case ENUMERAL_TYPE: ret = convert_to_integer (type, e); goto maybe_fold; case BOOLEAN_TYPE: return fold_convert_loc (loc, type, c_objc_common_truthvalue_conversion (input_location, expr)); case POINTER_TYPE: case REFERENCE_TYPE: ret = convert_to_pointer (type, e); goto maybe_fold; case REAL_TYPE: ret = convert_to_real (type, e); goto maybe_fold; case FIXED_POINT_TYPE: ret = convert_to_fixed (type, e); goto maybe_fold; case COMPLEX_TYPE: /* If converting from COMPLEX_TYPE to a different COMPLEX_TYPE and e is not COMPLEX_EXPR, convert_to_complex uses save_expr, but for the C FE c_save_expr needs to be called instead. */ if (TREE_CODE (TREE_TYPE (e)) == COMPLEX_TYPE) { if (TREE_CODE (e) != COMPLEX_EXPR) { tree subtype = TREE_TYPE (type); tree elt_type = TREE_TYPE (TREE_TYPE (e)); if (in_late_binary_op) e = save_expr (e); else e = c_save_expr (e); ret = fold_build2_loc (loc, COMPLEX_EXPR, type, convert (subtype, fold_build1 (REALPART_EXPR, elt_type, e)), convert (subtype, fold_build1 (IMAGPART_EXPR, elt_type, e))); goto maybe_fold; } } ret = convert_to_complex (type, e); goto maybe_fold; case VECTOR_TYPE: ret = convert_to_vector (type, e); goto maybe_fold; case RECORD_TYPE: case UNION_TYPE: if (lang_hooks.types_compatible_p (type, TREE_TYPE (expr))) return e; break; default: break; maybe_fold: if (TREE_CODE (ret) != C_MAYBE_CONST_EXPR) ret = fold (ret); return ret; } error ("conversion to non-scalar type requested"); return error_mark_node; }
static tree add_elt_to_tree (tree expr, tree type, tree elt, double_int scale, aff_tree *comb) { enum tree_code code; tree type1 = type; if (POINTER_TYPE_P (type)) #ifdef TARGET_POINTER_SIZETYPE { /* sizetype is not good enough for pointers in ADDRESS_SPACES on dsPIC; some pointers are larger than 'sizetype' (CAW) */ type1 = TARGET_POINTER_SIZETYPE(type); } #else type1 = sizetype; #endif scale = double_int_ext_for_comb (scale, comb); elt = fold_convert (type1, elt); if (double_int_one_p (scale)) { if (!expr) return fold_convert (type, elt); if (POINTER_TYPE_P (type)) return fold_build2 (POINTER_PLUS_EXPR, type, expr, elt); return fold_build2 (PLUS_EXPR, type, expr, elt); } if (double_int_minus_one_p (scale)) { if (!expr) return fold_convert (type, fold_build1 (NEGATE_EXPR, type1, elt)); if (POINTER_TYPE_P (type)) { elt = fold_build1 (NEGATE_EXPR, type1, elt); return fold_build2 (POINTER_PLUS_EXPR, type, expr, elt); } return fold_build2 (MINUS_EXPR, type, expr, elt); } if (!expr) return fold_convert (type, fold_build2 (MULT_EXPR, type1, elt, double_int_to_tree (type1, scale))); if (double_int_negative_p (scale)) { code = MINUS_EXPR; scale = double_int_neg (scale); } else code = PLUS_EXPR; elt = fold_build2 (MULT_EXPR, type1, elt, double_int_to_tree (type1, scale)); if (POINTER_TYPE_P (type)) { if (code == MINUS_EXPR) elt = fold_build1 (NEGATE_EXPR, type1, elt); return fold_build2 (POINTER_PLUS_EXPR, type, expr, elt); } return fold_build2 (code, type, expr, elt); }
static bool process_assignment (gimple stmt, gimple_stmt_iterator call, tree *m, tree *a, tree *ass_var) { tree op0, op1 = NULL_TREE, non_ass_var = NULL_TREE; tree dest = gimple_assign_lhs (stmt); enum tree_code code = gimple_assign_rhs_code (stmt); enum gimple_rhs_class rhs_class = get_gimple_rhs_class (code); tree src_var = gimple_assign_rhs1 (stmt); /* See if this is a simple copy operation of an SSA name to the function result. In that case we may have a simple tail call. Ignore type conversions that can never produce extra code between the function call and the function return. */ if ((rhs_class == GIMPLE_SINGLE_RHS || gimple_assign_cast_p (stmt)) && (TREE_CODE (src_var) == SSA_NAME)) { /* Reject a tailcall if the type conversion might need additional code. */ if (gimple_assign_cast_p (stmt)) { if (TYPE_MODE (TREE_TYPE (dest)) != TYPE_MODE (TREE_TYPE (src_var))) return false; /* Even if the type modes are the same, if the precision of the type is smaller than mode's precision, reduce_to_bit_field_precision would generate additional code. */ if (INTEGRAL_TYPE_P (TREE_TYPE (dest)) && (GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (dest))) > TYPE_PRECISION (TREE_TYPE (dest)))) return false; } if (src_var != *ass_var) return false; *ass_var = dest; return true; } switch (rhs_class) { case GIMPLE_BINARY_RHS: op1 = gimple_assign_rhs2 (stmt); /* Fall through. */ case GIMPLE_UNARY_RHS: op0 = gimple_assign_rhs1 (stmt); break; default: return false; } /* Accumulator optimizations will reverse the order of operations. We can only do that for floating-point types if we're assuming that addition and multiplication are associative. */ if (!flag_associative_math) if (FLOAT_TYPE_P (TREE_TYPE (DECL_RESULT (current_function_decl)))) return false; if (rhs_class == GIMPLE_UNARY_RHS) ; else if (op0 == *ass_var && (non_ass_var = independent_of_stmt_p (op1, stmt, call))) ; else if (op1 == *ass_var && (non_ass_var = independent_of_stmt_p (op0, stmt, call))) ; else return false; switch (code) { case PLUS_EXPR: *a = non_ass_var; *ass_var = dest; return true; case POINTER_PLUS_EXPR: if (op0 != *ass_var) return false; *a = non_ass_var; *ass_var = dest; return true; case MULT_EXPR: *m = non_ass_var; *ass_var = dest; return true; case NEGATE_EXPR: *m = build_minus_one_cst (TREE_TYPE (op0)); *ass_var = dest; return true; case MINUS_EXPR: if (*ass_var == op0) *a = fold_build1 (NEGATE_EXPR, TREE_TYPE (non_ass_var), non_ass_var); else { *m = build_minus_one_cst (TREE_TYPE (non_ass_var)); *a = fold_build1 (NEGATE_EXPR, TREE_TYPE (non_ass_var), non_ass_var); } *ass_var = dest; return true; /* TODO -- Handle POINTER_PLUS_EXPR. */ default: return false; } }
tree ubsan_instrument_shift (location_t loc, enum tree_code code, tree op0, tree op1) { tree t, tt = NULL_TREE; tree type0 = TREE_TYPE (op0); tree type1 = TREE_TYPE (op1); if (!INTEGRAL_TYPE_P (type0)) return NULL_TREE; tree op1_utype = unsigned_type_for (type1); HOST_WIDE_INT op0_prec = TYPE_PRECISION (type0); tree uprecm1 = build_int_cst (op1_utype, op0_prec - 1); op0 = unshare_expr (op0); op1 = unshare_expr (op1); t = fold_convert_loc (loc, op1_utype, op1); t = fold_build2 (GT_EXPR, boolean_type_node, t, uprecm1); /* If this is not a signed operation, don't perform overflow checks. Also punt on bit-fields. */ if (TYPE_OVERFLOW_WRAPS (type0) || GET_MODE_BITSIZE (TYPE_MODE (type0)) != TYPE_PRECISION (type0) || (flag_sanitize & SANITIZE_SHIFT_BASE) == 0) ; /* For signed x << y, in C99/C11, the following: (unsigned) x >> (uprecm1 - y) if non-zero, is undefined. */ else if (code == LSHIFT_EXPR && flag_isoc99 && cxx_dialect < cxx11) { tree x = fold_build2 (MINUS_EXPR, op1_utype, uprecm1, fold_convert (op1_utype, unshare_expr (op1))); tt = fold_convert_loc (loc, unsigned_type_for (type0), op0); tt = fold_build2 (RSHIFT_EXPR, TREE_TYPE (tt), tt, x); tt = fold_build2 (NE_EXPR, boolean_type_node, tt, build_int_cst (TREE_TYPE (tt), 0)); } /* For signed x << y, in C++11 and later, the following: x < 0 || ((unsigned) x >> (uprecm1 - y)) if > 1, is undefined. */ else if (code == LSHIFT_EXPR && cxx_dialect >= cxx11) { tree x = fold_build2 (MINUS_EXPR, op1_utype, uprecm1, fold_convert (op1_utype, unshare_expr (op1))); tt = fold_convert_loc (loc, unsigned_type_for (type0), unshare_expr (op0)); tt = fold_build2 (RSHIFT_EXPR, TREE_TYPE (tt), tt, x); tt = fold_build2 (GT_EXPR, boolean_type_node, tt, build_int_cst (TREE_TYPE (tt), 1)); x = fold_build2 (LT_EXPR, boolean_type_node, unshare_expr (op0), build_int_cst (type0, 0)); tt = fold_build2 (TRUTH_OR_EXPR, boolean_type_node, x, tt); } /* If the condition was folded to 0, no need to instrument this expression. */ if (integer_zerop (t) && (tt == NULL_TREE || integer_zerop (tt))) return NULL_TREE; /* In case we have a SAVE_EXPR in a conditional context, we need to make sure it gets evaluated before the condition. */ t = fold_build2 (COMPOUND_EXPR, TREE_TYPE (t), unshare_expr (op0), t); enum sanitize_code recover_kind = SANITIZE_SHIFT_EXPONENT; tree else_t = void_node; if (tt) { if ((flag_sanitize & SANITIZE_SHIFT_EXPONENT) == 0) { t = fold_build1 (TRUTH_NOT_EXPR, boolean_type_node, t); t = fold_build2 (TRUTH_AND_EXPR, boolean_type_node, t, tt); recover_kind = SANITIZE_SHIFT_BASE; } else { if (flag_sanitize_undefined_trap_on_error || ((!(flag_sanitize_recover & SANITIZE_SHIFT_EXPONENT)) == (!(flag_sanitize_recover & SANITIZE_SHIFT_BASE)))) t = fold_build2 (TRUTH_OR_EXPR, boolean_type_node, t, tt); else else_t = tt; } } if (flag_sanitize_undefined_trap_on_error) tt = build_call_expr_loc (loc, builtin_decl_explicit (BUILT_IN_TRAP), 0); else { tree data = ubsan_create_data ("__ubsan_shift_data", 1, &loc, ubsan_type_descriptor (type0), ubsan_type_descriptor (type1), NULL_TREE, NULL_TREE); data = build_fold_addr_expr_loc (loc, data); enum built_in_function bcode = (flag_sanitize_recover & recover_kind) ? BUILT_IN_UBSAN_HANDLE_SHIFT_OUT_OF_BOUNDS : BUILT_IN_UBSAN_HANDLE_SHIFT_OUT_OF_BOUNDS_ABORT; tt = builtin_decl_explicit (bcode); op0 = unshare_expr (op0); op1 = unshare_expr (op1); tt = build_call_expr_loc (loc, tt, 3, data, ubsan_encode_value (op0), ubsan_encode_value (op1)); if (else_t != void_node) { bcode = (flag_sanitize_recover & SANITIZE_SHIFT_BASE) ? BUILT_IN_UBSAN_HANDLE_SHIFT_OUT_OF_BOUNDS : BUILT_IN_UBSAN_HANDLE_SHIFT_OUT_OF_BOUNDS_ABORT; tree else_tt = builtin_decl_explicit (bcode); op0 = unshare_expr (op0); op1 = unshare_expr (op1); else_tt = build_call_expr_loc (loc, else_tt, 3, data, ubsan_encode_value (op0), ubsan_encode_value (op1)); else_t = fold_build3 (COND_EXPR, void_type_node, else_t, else_tt, void_node); } } t = fold_build3 (COND_EXPR, void_type_node, t, tt, else_t); return t; }
static bool forward_propagate_addr_expr_1 (tree name, tree def_rhs, gimple_stmt_iterator *use_stmt_gsi, bool single_use_p) { tree lhs, rhs, rhs2, array_ref; tree *rhsp, *lhsp; gimple use_stmt = gsi_stmt (*use_stmt_gsi); enum tree_code rhs_code; gcc_assert (TREE_CODE (def_rhs) == ADDR_EXPR); lhs = gimple_assign_lhs (use_stmt); rhs_code = gimple_assign_rhs_code (use_stmt); rhs = gimple_assign_rhs1 (use_stmt); /* Trivial cases. The use statement could be a trivial copy or a useless conversion. Recurse to the uses of the lhs as copyprop does not copy through different variant pointers and FRE does not catch all useless conversions. Treat the case of a single-use name and a conversion to def_rhs type separate, though. */ if (TREE_CODE (lhs) == SSA_NAME && ((rhs_code == SSA_NAME && rhs == name) || CONVERT_EXPR_CODE_P (rhs_code))) { /* Only recurse if we don't deal with a single use or we cannot do the propagation to the current statement. In particular we can end up with a conversion needed for a non-invariant address which we cannot do in a single statement. */ if (!single_use_p || (!useless_type_conversion_p (TREE_TYPE (lhs), TREE_TYPE (def_rhs)) && !is_gimple_min_invariant (def_rhs))) return forward_propagate_addr_expr (lhs, def_rhs); gimple_assign_set_rhs1 (use_stmt, unshare_expr (def_rhs)); if (useless_type_conversion_p (TREE_TYPE (lhs), TREE_TYPE (def_rhs))) gimple_assign_set_rhs_code (use_stmt, TREE_CODE (def_rhs)); else gimple_assign_set_rhs_code (use_stmt, NOP_EXPR); return true; } /* Now strip away any outer COMPONENT_REF/ARRAY_REF nodes from the LHS. ADDR_EXPR will not appear on the LHS. */ lhsp = gimple_assign_lhs_ptr (use_stmt); while (handled_component_p (*lhsp)) lhsp = &TREE_OPERAND (*lhsp, 0); lhs = *lhsp; /* Now see if the LHS node is an INDIRECT_REF using NAME. If so, propagate the ADDR_EXPR into the use of NAME and fold the result. */ if (TREE_CODE (lhs) == INDIRECT_REF && TREE_OPERAND (lhs, 0) == name && may_propagate_address_into_dereference (def_rhs, lhs) && (lhsp != gimple_assign_lhs_ptr (use_stmt) || useless_type_conversion_p (TREE_TYPE (TREE_OPERAND (def_rhs, 0)), TREE_TYPE (rhs)))) { *lhsp = unshare_expr (TREE_OPERAND (def_rhs, 0)); fold_stmt_inplace (use_stmt); tidy_after_forward_propagate_addr (use_stmt); /* Continue propagating into the RHS if this was not the only use. */ if (single_use_p) return true; } /* Strip away any outer COMPONENT_REF, ARRAY_REF or ADDR_EXPR nodes from the RHS. */ rhsp = gimple_assign_rhs1_ptr (use_stmt); while (handled_component_p (*rhsp) || TREE_CODE (*rhsp) == ADDR_EXPR) rhsp = &TREE_OPERAND (*rhsp, 0); rhs = *rhsp; /* Now see if the RHS node is an INDIRECT_REF using NAME. If so, propagate the ADDR_EXPR into the use of NAME and fold the result. */ if (TREE_CODE (rhs) == INDIRECT_REF && TREE_OPERAND (rhs, 0) == name && may_propagate_address_into_dereference (def_rhs, rhs)) { *rhsp = unshare_expr (TREE_OPERAND (def_rhs, 0)); fold_stmt_inplace (use_stmt); tidy_after_forward_propagate_addr (use_stmt); return true; } /* Now see if the RHS node is an INDIRECT_REF using NAME. If so, propagate the ADDR_EXPR into the use of NAME and try to create a VCE and fold the result. */ if (TREE_CODE (rhs) == INDIRECT_REF && TREE_OPERAND (rhs, 0) == name && TYPE_SIZE (TREE_TYPE (rhs)) && TYPE_SIZE (TREE_TYPE (TREE_OPERAND (def_rhs, 0))) /* Function decls should not be used for VCE either as it could be a function descriptor that we want and not the actual function code. */ && TREE_CODE (TREE_OPERAND (def_rhs, 0)) != FUNCTION_DECL /* We should not convert volatile loads to non volatile loads. */ && !TYPE_VOLATILE (TREE_TYPE (rhs)) && !TYPE_VOLATILE (TREE_TYPE (TREE_OPERAND (def_rhs, 0))) && operand_equal_p (TYPE_SIZE (TREE_TYPE (rhs)), TYPE_SIZE (TREE_TYPE (TREE_OPERAND (def_rhs, 0))), 0)) { tree def_rhs_base, new_rhs = unshare_expr (TREE_OPERAND (def_rhs, 0)); new_rhs = fold_build1 (VIEW_CONVERT_EXPR, TREE_TYPE (rhs), new_rhs); if (TREE_CODE (new_rhs) != VIEW_CONVERT_EXPR) { /* If we have folded the VIEW_CONVERT_EXPR then the result is only valid if we can replace the whole rhs of the use statement. */ if (rhs != gimple_assign_rhs1 (use_stmt)) return false; new_rhs = force_gimple_operand_gsi (use_stmt_gsi, new_rhs, true, NULL, true, GSI_NEW_STMT); gimple_assign_set_rhs1 (use_stmt, new_rhs); tidy_after_forward_propagate_addr (use_stmt); return true; } /* If the defining rhs comes from an indirect reference, then do not convert into a VIEW_CONVERT_EXPR. */ def_rhs_base = TREE_OPERAND (def_rhs, 0); while (handled_component_p (def_rhs_base)) def_rhs_base = TREE_OPERAND (def_rhs_base, 0); if (!INDIRECT_REF_P (def_rhs_base)) { /* We may have arbitrary VIEW_CONVERT_EXPRs in a nested component reference. Place it there and fold the thing. */ *rhsp = new_rhs; fold_stmt_inplace (use_stmt); tidy_after_forward_propagate_addr (use_stmt); return true; } } /* If the use of the ADDR_EXPR is not a POINTER_PLUS_EXPR, there is nothing to do. */ if (gimple_assign_rhs_code (use_stmt) != POINTER_PLUS_EXPR || gimple_assign_rhs1 (use_stmt) != name) return false; /* The remaining cases are all for turning pointer arithmetic into array indexing. They only apply when we have the address of element zero in an array. If that is not the case then there is nothing to do. */ array_ref = TREE_OPERAND (def_rhs, 0); if (TREE_CODE (array_ref) != ARRAY_REF || TREE_CODE (TREE_TYPE (TREE_OPERAND (array_ref, 0))) != ARRAY_TYPE || !integer_zerop (TREE_OPERAND (array_ref, 1))) return false; rhs2 = gimple_assign_rhs2 (use_stmt); /* Try to optimize &x[0] p+ C where C is a multiple of the size of the elements in X into &x[C/element size]. */ if (TREE_CODE (rhs2) == INTEGER_CST) { tree new_rhs = maybe_fold_stmt_addition (gimple_expr_type (use_stmt), array_ref, rhs2); if (new_rhs) { gimple_assign_set_rhs_from_tree (use_stmt_gsi, new_rhs); use_stmt = gsi_stmt (*use_stmt_gsi); update_stmt (use_stmt); tidy_after_forward_propagate_addr (use_stmt); return true; } } /* Try to optimize &x[0] p+ OFFSET where OFFSET is defined by converting a multiplication of an index by the size of the array elements, then the result is converted into the proper type for the arithmetic. */ if (TREE_CODE (rhs2) == SSA_NAME /* Avoid problems with IVopts creating PLUS_EXPRs with a different type than their operands. */ && useless_type_conversion_p (TREE_TYPE (lhs), TREE_TYPE (def_rhs))) return forward_propagate_addr_into_variable_array_index (rhs2, def_rhs, use_stmt_gsi); return false; }
tree c_finish_omp_for (location_t locus, tree decl, tree init, tree cond, tree incr, tree body, tree pre_body) { location_t elocus = locus; bool fail = false; if (EXPR_HAS_LOCATION (init)) elocus = EXPR_LOCATION (init); /* Validate the iteration variable. */ if (!INTEGRAL_TYPE_P (TREE_TYPE (decl))) { error ("%Hinvalid type for iteration variable %qE", &elocus, decl); fail = true; } if (TYPE_UNSIGNED (TREE_TYPE (decl))) warning (0, "%Hiteration variable %qE is unsigned", &elocus, decl); /* In the case of "for (int i = 0...)", init will be a decl. It should have a DECL_INITIAL that we can turn into an assignment. */ if (init == decl) { elocus = DECL_SOURCE_LOCATION (decl); init = DECL_INITIAL (decl); if (init == NULL) { error ("%H%qE is not initialized", &elocus, decl); init = integer_zero_node; fail = true; } init = build_modify_expr (decl, NOP_EXPR, init); SET_EXPR_LOCATION (init, elocus); } gcc_assert (TREE_CODE (init) == MODIFY_EXPR); gcc_assert (TREE_OPERAND (init, 0) == decl); if (cond == NULL_TREE) { error ("%Hmissing controlling predicate", &elocus); fail = true; } else { bool cond_ok = false; if (EXPR_HAS_LOCATION (cond)) elocus = EXPR_LOCATION (cond); if (TREE_CODE (cond) == LT_EXPR || TREE_CODE (cond) == LE_EXPR || TREE_CODE (cond) == GT_EXPR || TREE_CODE (cond) == GE_EXPR) { tree op0 = TREE_OPERAND (cond, 0); tree op1 = TREE_OPERAND (cond, 1); /* 2.5.1. The comparison in the condition is computed in the type of DECL, otherwise the behavior is undefined. For example: long n; int i; i < n; according to ISO will be evaluated as: (long)i < n; We want to force: i < (int)n; */ if (TREE_CODE (op0) == NOP_EXPR && decl == TREE_OPERAND (op0, 0)) { TREE_OPERAND (cond, 0) = TREE_OPERAND (op0, 0); TREE_OPERAND (cond, 1) = fold_build1 (NOP_EXPR, TREE_TYPE (decl), TREE_OPERAND (cond, 1)); } else if (TREE_CODE (op1) == NOP_EXPR && decl == TREE_OPERAND (op1, 0)) { TREE_OPERAND (cond, 1) = TREE_OPERAND (op1, 0); TREE_OPERAND (cond, 0) = fold_build1 (NOP_EXPR, TREE_TYPE (decl), TREE_OPERAND (cond, 0)); } if (decl == TREE_OPERAND (cond, 0)) cond_ok = true; else if (decl == TREE_OPERAND (cond, 1)) { TREE_SET_CODE (cond, swap_tree_comparison (TREE_CODE (cond))); TREE_OPERAND (cond, 1) = TREE_OPERAND (cond, 0); TREE_OPERAND (cond, 0) = decl; cond_ok = true; } } if (!cond_ok) { error ("%Hinvalid controlling predicate", &elocus); fail = true; } } if (incr == NULL_TREE) { error ("%Hmissing increment expression", &elocus); fail = true; } else { bool incr_ok = false; if (EXPR_HAS_LOCATION (incr)) elocus = EXPR_LOCATION (incr); /* Check all the valid increment expressions: v++, v--, ++v, --v, v = v + incr, v = incr + v and v = v - incr. */ switch (TREE_CODE (incr)) { case POSTINCREMENT_EXPR: case PREINCREMENT_EXPR: case POSTDECREMENT_EXPR: case PREDECREMENT_EXPR: incr_ok = (TREE_OPERAND (incr, 0) == decl); break; case MODIFY_EXPR: if (TREE_OPERAND (incr, 0) != decl) break; if (TREE_OPERAND (incr, 1) == decl) break; if (TREE_CODE (TREE_OPERAND (incr, 1)) == PLUS_EXPR && (TREE_OPERAND (TREE_OPERAND (incr, 1), 0) == decl || TREE_OPERAND (TREE_OPERAND (incr, 1), 1) == decl)) incr_ok = true; else if (TREE_CODE (TREE_OPERAND (incr, 1)) == MINUS_EXPR && TREE_OPERAND (TREE_OPERAND (incr, 1), 0) == decl) incr_ok = true; else { tree t = check_omp_for_incr_expr (TREE_OPERAND (incr, 1), decl); if (t != error_mark_node) { incr_ok = true; t = build2 (PLUS_EXPR, TREE_TYPE (decl), decl, t); incr = build2 (MODIFY_EXPR, void_type_node, decl, t); } } break; default: break; } if (!incr_ok) { error ("%Hinvalid increment expression", &elocus); fail = true; } } if (fail) return NULL; else { tree t = make_node (OMP_FOR); TREE_TYPE (t) = void_type_node; OMP_FOR_INIT (t) = init; OMP_FOR_COND (t) = cond; OMP_FOR_INCR (t) = incr; OMP_FOR_BODY (t) = body; OMP_FOR_PRE_BODY (t) = pre_body; SET_EXPR_LOCATION (t, locus); return add_stmt (t); } }
tree convert (tree type, tree expr) { tree e = expr; enum tree_code code = TREE_CODE (type); const char *invalid_conv_diag; tree ret; location_t loc = EXPR_LOCATION (expr); if (type == error_mark_node || error_operand_p (expr)) return error_mark_node; if ((invalid_conv_diag = targetm.invalid_conversion (TREE_TYPE (expr), type))) { error (invalid_conv_diag); return error_mark_node; } if (type == TREE_TYPE (expr)) return expr; ret = targetm.convert_to_type (type, expr); if (ret) return ret; STRIP_TYPE_NOPS (e); if (TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (TREE_TYPE (expr)) && (TREE_CODE (TREE_TYPE (expr)) != COMPLEX_TYPE || TREE_CODE (e) == COMPLEX_EXPR)) return fold_convert_loc (loc, type, expr); if (TREE_CODE (TREE_TYPE (expr)) == ERROR_MARK) return error_mark_node; if (TREE_CODE (TREE_TYPE (expr)) == VOID_TYPE) { error ("void value not ignored as it ought to be"); return error_mark_node; } switch (code) { case VOID_TYPE: return fold_convert_loc (loc, type, e); case INTEGER_TYPE: case ENUMERAL_TYPE: if (flag_sanitize & SANITIZE_FLOAT_CAST && TREE_CODE (TREE_TYPE (expr)) == REAL_TYPE && COMPLETE_TYPE_P (type) && current_function_decl != NULL_TREE && !lookup_attribute ("no_sanitize_undefined", DECL_ATTRIBUTES (current_function_decl))) { tree arg; if (in_late_binary_op) { expr = save_expr (expr); arg = expr; } else { expr = c_save_expr (expr); arg = c_fully_fold (expr, false, NULL); } tree check = ubsan_instrument_float_cast (loc, type, expr, arg); expr = fold_build1 (FIX_TRUNC_EXPR, type, expr); if (check == NULL) return expr; return fold_build2 (COMPOUND_EXPR, TREE_TYPE (expr), check, expr); } ret = convert_to_integer (type, e); goto maybe_fold; case BOOLEAN_TYPE: return fold_convert_loc (loc, type, c_objc_common_truthvalue_conversion (input_location, expr)); case POINTER_TYPE: case REFERENCE_TYPE: ret = convert_to_pointer (type, e); goto maybe_fold; case REAL_TYPE: ret = convert_to_real (type, e); goto maybe_fold; case FIXED_POINT_TYPE: ret = convert_to_fixed (type, e); goto maybe_fold; case COMPLEX_TYPE: /* If converting from COMPLEX_TYPE to a different COMPLEX_TYPE and e is not COMPLEX_EXPR, convert_to_complex uses save_expr, but for the C FE c_save_expr needs to be called instead. */ if (TREE_CODE (TREE_TYPE (e)) == COMPLEX_TYPE) { if (TREE_CODE (e) != COMPLEX_EXPR) { tree subtype = TREE_TYPE (type); tree elt_type = TREE_TYPE (TREE_TYPE (e)); if (in_late_binary_op) e = save_expr (e); else e = c_save_expr (e); ret = fold_build2_loc (loc, COMPLEX_EXPR, type, convert (subtype, fold_build1 (REALPART_EXPR, elt_type, e)), convert (subtype, fold_build1 (IMAGPART_EXPR, elt_type, e))); goto maybe_fold; } } ret = convert_to_complex (type, e); goto maybe_fold; case VECTOR_TYPE: ret = convert_to_vector (type, e); goto maybe_fold; case RECORD_TYPE: case UNION_TYPE: if (lang_hooks.types_compatible_p (type, TREE_TYPE (expr))) return e; break; default: break; maybe_fold: if (TREE_CODE (ret) != C_MAYBE_CONST_EXPR) ret = fold (ret); return ret; } error ("conversion to non-scalar type requested"); return error_mark_node; }
static bool conditional_replacement (basic_block cond_bb, basic_block middle_bb, edge e0, edge e1, gimple phi, tree arg0, tree arg1) { tree result; gimple stmt, new_stmt; tree cond; gimple_stmt_iterator gsi; edge true_edge, false_edge; tree new_var, new_var2; /* FIXME: Gimplification of complex type is too hard for now. */ if (TREE_CODE (TREE_TYPE (arg0)) == COMPLEX_TYPE || TREE_CODE (TREE_TYPE (arg1)) == COMPLEX_TYPE) return false; /* The PHI arguments have the constants 0 and 1, then convert it to the conditional. */ if ((integer_zerop (arg0) && integer_onep (arg1)) || (integer_zerop (arg1) && integer_onep (arg0))) ; else return false; if (!empty_block_p (middle_bb)) return false; /* At this point we know we have a GIMPLE_COND with two successors. One successor is BB, the other successor is an empty block which falls through into BB. There is a single PHI node at the join point (BB) and its arguments are constants (0, 1). So, given the condition COND, and the two PHI arguments, we can rewrite this PHI into non-branching code: dest = (COND) or dest = COND' We use the condition as-is if the argument associated with the true edge has the value one or the argument associated with the false edge as the value zero. Note that those conditions are not the same since only one of the outgoing edges from the GIMPLE_COND will directly reach BB and thus be associated with an argument. */ stmt = last_stmt (cond_bb); result = PHI_RESULT (phi); /* To handle special cases like floating point comparison, it is easier and less error-prone to build a tree and gimplify it on the fly though it is less efficient. */ cond = fold_build2 (gimple_cond_code (stmt), boolean_type_node, gimple_cond_lhs (stmt), gimple_cond_rhs (stmt)); /* We need to know which is the true edge and which is the false edge so that we know when to invert the condition below. */ extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge); if ((e0 == true_edge && integer_zerop (arg0)) || (e0 == false_edge && integer_onep (arg0)) || (e1 == true_edge && integer_zerop (arg1)) || (e1 == false_edge && integer_onep (arg1))) cond = fold_build1 (TRUTH_NOT_EXPR, TREE_TYPE (cond), cond); /* Insert our new statements at the end of conditional block before the COND_STMT. */ gsi = gsi_for_stmt (stmt); new_var = force_gimple_operand_gsi (&gsi, cond, true, NULL, true, GSI_SAME_STMT); if (!useless_type_conversion_p (TREE_TYPE (result), TREE_TYPE (new_var))) { new_var2 = create_tmp_var (TREE_TYPE (result), NULL); add_referenced_var (new_var2); new_stmt = gimple_build_assign_with_ops (CONVERT_EXPR, new_var2, new_var, NULL); new_var2 = make_ssa_name (new_var2, new_stmt); gimple_assign_set_lhs (new_stmt, new_var2); gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT); new_var = new_var2; } replace_phi_edge_with_variable (cond_bb, e1, phi, new_var); /* Note that we optimized this PHI. */ return true; }
static bool ifcombine_ifandif (basic_block inner_cond_bb, bool inner_inv, basic_block outer_cond_bb, bool outer_inv, bool result_inv) { gimple_stmt_iterator gsi; gimple inner_stmt, outer_stmt; gcond *inner_cond, *outer_cond; tree name1, name2, bit1, bit2, bits1, bits2; inner_stmt = last_stmt (inner_cond_bb); if (!inner_stmt || gimple_code (inner_stmt) != GIMPLE_COND) return false; inner_cond = as_a <gcond *> (inner_stmt); outer_stmt = last_stmt (outer_cond_bb); if (!outer_stmt || gimple_code (outer_stmt) != GIMPLE_COND) return false; outer_cond = as_a <gcond *> (outer_stmt); /* See if we test a single bit of the same name in both tests. In that case remove the outer test, merging both else edges, and change the inner one to test for name & (bit1 | bit2) == (bit1 | bit2). */ if (recognize_single_bit_test (inner_cond, &name1, &bit1, inner_inv) && recognize_single_bit_test (outer_cond, &name2, &bit2, outer_inv) && name1 == name2) { tree t, t2; /* Do it. */ gsi = gsi_for_stmt (inner_cond); t = fold_build2 (LSHIFT_EXPR, TREE_TYPE (name1), build_int_cst (TREE_TYPE (name1), 1), bit1); t2 = fold_build2 (LSHIFT_EXPR, TREE_TYPE (name1), build_int_cst (TREE_TYPE (name1), 1), bit2); t = fold_build2 (BIT_IOR_EXPR, TREE_TYPE (name1), t, t2); t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT); t2 = fold_build2 (BIT_AND_EXPR, TREE_TYPE (name1), name1, t); t2 = force_gimple_operand_gsi (&gsi, t2, true, NULL_TREE, true, GSI_SAME_STMT); t = fold_build2 (result_inv ? NE_EXPR : EQ_EXPR, boolean_type_node, t2, t); t = canonicalize_cond_expr_cond (t); if (!t) return false; gimple_cond_set_condition_from_tree (inner_cond, t); update_stmt (inner_cond); /* Leave CFG optimization to cfg_cleanup. */ gimple_cond_set_condition_from_tree (outer_cond, outer_inv ? boolean_false_node : boolean_true_node); update_stmt (outer_cond); if (dump_file) { fprintf (dump_file, "optimizing double bit test to "); print_generic_expr (dump_file, name1, 0); fprintf (dump_file, " & T == T\nwith temporary T = (1 << "); print_generic_expr (dump_file, bit1, 0); fprintf (dump_file, ") | (1 << "); print_generic_expr (dump_file, bit2, 0); fprintf (dump_file, ")\n"); } return true; } /* See if we have two bit tests of the same name in both tests. In that case remove the outer test and change the inner one to test for name & (bits1 | bits2) != 0. */ else if (recognize_bits_test (inner_cond, &name1, &bits1, !inner_inv) && recognize_bits_test (outer_cond, &name2, &bits2, !outer_inv)) { gimple_stmt_iterator gsi; tree t; /* Find the common name which is bit-tested. */ if (name1 == name2) ; else if (bits1 == bits2) { t = name2; name2 = bits2; bits2 = t; t = name1; name1 = bits1; bits1 = t; } else if (name1 == bits2) { t = name2; name2 = bits2; bits2 = t; } else if (bits1 == name2) { t = name1; name1 = bits1; bits1 = t; } else return false; /* As we strip non-widening conversions in finding a common name that is tested make sure to end up with an integral type for building the bit operations. */ if (TYPE_PRECISION (TREE_TYPE (bits1)) >= TYPE_PRECISION (TREE_TYPE (bits2))) { bits1 = fold_convert (unsigned_type_for (TREE_TYPE (bits1)), bits1); name1 = fold_convert (TREE_TYPE (bits1), name1); bits2 = fold_convert (unsigned_type_for (TREE_TYPE (bits2)), bits2); bits2 = fold_convert (TREE_TYPE (bits1), bits2); } else { bits2 = fold_convert (unsigned_type_for (TREE_TYPE (bits2)), bits2); name1 = fold_convert (TREE_TYPE (bits2), name1); bits1 = fold_convert (unsigned_type_for (TREE_TYPE (bits1)), bits1); bits1 = fold_convert (TREE_TYPE (bits2), bits1); } /* Do it. */ gsi = gsi_for_stmt (inner_cond); t = fold_build2 (BIT_IOR_EXPR, TREE_TYPE (name1), bits1, bits2); t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT); t = fold_build2 (BIT_AND_EXPR, TREE_TYPE (name1), name1, t); t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT); t = fold_build2 (result_inv ? NE_EXPR : EQ_EXPR, boolean_type_node, t, build_int_cst (TREE_TYPE (t), 0)); t = canonicalize_cond_expr_cond (t); if (!t) return false; gimple_cond_set_condition_from_tree (inner_cond, t); update_stmt (inner_cond); /* Leave CFG optimization to cfg_cleanup. */ gimple_cond_set_condition_from_tree (outer_cond, outer_inv ? boolean_false_node : boolean_true_node); update_stmt (outer_cond); if (dump_file) { fprintf (dump_file, "optimizing bits or bits test to "); print_generic_expr (dump_file, name1, 0); fprintf (dump_file, " & T != 0\nwith temporary T = "); print_generic_expr (dump_file, bits1, 0); fprintf (dump_file, " | "); print_generic_expr (dump_file, bits2, 0); fprintf (dump_file, "\n"); } return true; } /* See if we have two comparisons that we can merge into one. */ else if (TREE_CODE_CLASS (gimple_cond_code (inner_cond)) == tcc_comparison && TREE_CODE_CLASS (gimple_cond_code (outer_cond)) == tcc_comparison) { tree t; enum tree_code inner_cond_code = gimple_cond_code (inner_cond); enum tree_code outer_cond_code = gimple_cond_code (outer_cond); /* Invert comparisons if necessary (and possible). */ if (inner_inv) inner_cond_code = invert_tree_comparison (inner_cond_code, HONOR_NANS (gimple_cond_lhs (inner_cond))); if (inner_cond_code == ERROR_MARK) return false; if (outer_inv) outer_cond_code = invert_tree_comparison (outer_cond_code, HONOR_NANS (gimple_cond_lhs (outer_cond))); if (outer_cond_code == ERROR_MARK) return false; /* Don't return false so fast, try maybe_fold_or_comparisons? */ if (!(t = maybe_fold_and_comparisons (inner_cond_code, gimple_cond_lhs (inner_cond), gimple_cond_rhs (inner_cond), outer_cond_code, gimple_cond_lhs (outer_cond), gimple_cond_rhs (outer_cond)))) { tree t1, t2; gimple_stmt_iterator gsi; if (!LOGICAL_OP_NON_SHORT_CIRCUIT) return false; /* Only do this optimization if the inner bb contains only the conditional. */ if (!gsi_one_before_end_p (gsi_start_nondebug_after_labels_bb (inner_cond_bb))) return false; t1 = fold_build2_loc (gimple_location (inner_cond), inner_cond_code, boolean_type_node, gimple_cond_lhs (inner_cond), gimple_cond_rhs (inner_cond)); t2 = fold_build2_loc (gimple_location (outer_cond), outer_cond_code, boolean_type_node, gimple_cond_lhs (outer_cond), gimple_cond_rhs (outer_cond)); t = fold_build2_loc (gimple_location (inner_cond), TRUTH_AND_EXPR, boolean_type_node, t1, t2); if (result_inv) { t = fold_build1 (TRUTH_NOT_EXPR, TREE_TYPE (t), t); result_inv = false; } gsi = gsi_for_stmt (inner_cond); t = force_gimple_operand_gsi_1 (&gsi, t, is_gimple_condexpr, NULL, true, GSI_SAME_STMT); } if (result_inv) t = fold_build1 (TRUTH_NOT_EXPR, TREE_TYPE (t), t); t = canonicalize_cond_expr_cond (t); if (!t) return false; gimple_cond_set_condition_from_tree (inner_cond, t); update_stmt (inner_cond); /* Leave CFG optimization to cfg_cleanup. */ gimple_cond_set_condition_from_tree (outer_cond, outer_inv ? boolean_false_node : boolean_true_node); update_stmt (outer_cond); if (dump_file) { fprintf (dump_file, "optimizing two comparisons to "); print_generic_expr (dump_file, t, 0); fprintf (dump_file, "\n"); } return true; } return false; }