static inline tree build_size_arg_loc (location_t loc, tree nb_iter, tree op, gimple_seq *stmt_list) { gimple_seq stmts; tree x = fold_build2_loc (loc, MULT_EXPR, size_type_node, fold_convert_loc (loc, size_type_node, nb_iter), fold_convert_loc (loc, size_type_node, TYPE_SIZE_UNIT (TREE_TYPE (op)))); x = force_gimple_operand (x, &stmts, true, NULL); gimple_seq_add_seq (stmt_list, stmts); return x; }
static void generate_memset_zero (gimple stmt, tree op0, tree nb_iter, gimple_stmt_iterator bsi) { tree addr_base, nb_bytes; bool res = false; gimple_seq stmt_list = NULL, stmts; gimple fn_call; tree mem, fn; struct data_reference *dr = XCNEW (struct data_reference); location_t loc = gimple_location (stmt); DR_STMT (dr) = stmt; DR_REF (dr) = op0; res = dr_analyze_innermost (dr); gcc_assert (res && stride_of_unit_type_p (DR_STEP (dr), TREE_TYPE (op0))); nb_bytes = build_size_arg_loc (loc, nb_iter, op0, &stmt_list); addr_base = size_binop_loc (loc, PLUS_EXPR, DR_OFFSET (dr), DR_INIT (dr)); addr_base = fold_convert_loc (loc, sizetype, addr_base); /* Test for a negative stride, iterating over every element. */ if (integer_zerop (size_binop (PLUS_EXPR, TYPE_SIZE_UNIT (TREE_TYPE (op0)), fold_convert (sizetype, DR_STEP (dr))))) { addr_base = size_binop_loc (loc, MINUS_EXPR, addr_base, fold_convert_loc (loc, sizetype, nb_bytes)); addr_base = size_binop_loc (loc, PLUS_EXPR, addr_base, TYPE_SIZE_UNIT (TREE_TYPE (op0))); } addr_base = fold_build2_loc (loc, POINTER_PLUS_EXPR, TREE_TYPE (DR_BASE_ADDRESS (dr)), DR_BASE_ADDRESS (dr), addr_base); mem = force_gimple_operand (addr_base, &stmts, true, NULL); gimple_seq_add_seq (&stmt_list, stmts); fn = build_fold_addr_expr (implicit_built_in_decls [BUILT_IN_MEMSET]); fn_call = gimple_build_call (fn, 3, mem, integer_zero_node, nb_bytes); gimple_seq_add_stmt (&stmt_list, fn_call); gsi_insert_seq_after (&bsi, stmt_list, GSI_CONTINUE_LINKING); if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, "generated memset zero\n"); free_data_ref (dr); }
static tree cp_ubsan_instrument_vptr (location_t loc, tree op, tree type, bool is_addr, enum ubsan_null_ckind ckind) { type = TYPE_MAIN_VARIANT (type); const char *mangled = mangle_type_string (type); hashval_t str_hash1 = htab_hash_string (mangled); hashval_t str_hash2 = iterative_hash (mangled, strlen (mangled), 0); tree str_hash = wide_int_to_tree (uint64_type_node, wi::uhwi (((uint64_t) str_hash1 << 32) | str_hash2, 64)); if (!is_addr) op = build_fold_addr_expr_loc (loc, op); op = save_expr (op); tree vptr = fold_build3_loc (loc, COMPONENT_REF, TREE_TYPE (TYPE_VFIELD (type)), build_fold_indirect_ref_loc (loc, op), TYPE_VFIELD (type), NULL_TREE); vptr = fold_convert_loc (loc, pointer_sized_int_node, vptr); vptr = fold_convert_loc (loc, uint64_type_node, vptr); if (ckind == UBSAN_DOWNCAST_POINTER) { tree cond = build2_loc (loc, NE_EXPR, boolean_type_node, op, build_zero_cst (TREE_TYPE (op))); /* This is a compiler generated comparison, don't emit e.g. -Wnonnull-compare warning for it. */ TREE_NO_WARNING (cond) = 1; vptr = build3_loc (loc, COND_EXPR, uint64_type_node, cond, vptr, build_int_cst (uint64_type_node, 0)); } tree ti_decl = get_tinfo_decl (type); mark_used (ti_decl); tree ptype = build_pointer_type (type); tree call = build_call_expr_internal_loc (loc, IFN_UBSAN_VPTR, void_type_node, 5, op, vptr, str_hash, build_address (ti_decl), build_int_cst (ptype, ckind)); TREE_SIDE_EFFECTS (call) = 1; return fold_build2 (COMPOUND_EXPR, TREE_TYPE (op), call, op); }
static tree c_omp_for_incr_canonicalize_ptr (location_t loc, tree decl, tree incr) { if (POINTER_TYPE_P (TREE_TYPE (decl)) && TREE_OPERAND (incr, 1)) { tree t = fold_convert_loc (loc, sizetype, TREE_OPERAND (incr, 1)); if (TREE_CODE (incr) == POSTDECREMENT_EXPR || TREE_CODE (incr) == PREDECREMENT_EXPR) t = fold_build1_loc (loc, NEGATE_EXPR, sizetype, t); t = fold_build_pointer_plus (decl, t); incr = build2 (MODIFY_EXPR, void_type_node, decl, t); } return incr; }
static tree check_omp_for_incr_expr (location_t loc, tree exp, tree decl) { tree t; if (!INTEGRAL_TYPE_P (TREE_TYPE (exp)) || TYPE_PRECISION (TREE_TYPE (exp)) < TYPE_PRECISION (TREE_TYPE (decl))) return error_mark_node; if (exp == decl) return build_int_cst (TREE_TYPE (exp), 0); switch (TREE_CODE (exp)) { CASE_CONVERT: t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 0), decl); if (t != error_mark_node) return fold_convert_loc (loc, TREE_TYPE (exp), t); break; case MINUS_EXPR: t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 0), decl); if (t != error_mark_node) return fold_build2_loc (loc, MINUS_EXPR, TREE_TYPE (exp), t, TREE_OPERAND (exp, 1)); break; case PLUS_EXPR: t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 0), decl); if (t != error_mark_node) return fold_build2_loc (loc, PLUS_EXPR, TREE_TYPE (exp), t, TREE_OPERAND (exp, 1)); t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 1), decl); if (t != error_mark_node) return fold_build2_loc (loc, PLUS_EXPR, TREE_TYPE (exp), TREE_OPERAND (exp, 0), t); break; default: break; } return error_mark_node; }
tree c_fully_fold (tree expr, bool in_init, bool *maybe_const) { tree ret; tree eptype = NULL_TREE; bool dummy = true; bool maybe_const_itself = true; location_t loc = EXPR_LOCATION (expr); if (!maybe_const) maybe_const = &dummy; if (TREE_CODE (expr) == EXCESS_PRECISION_EXPR) { eptype = TREE_TYPE (expr); expr = TREE_OPERAND (expr, 0); } ret = c_fully_fold_internal (expr, in_init, maybe_const, &maybe_const_itself, false); if (eptype) ret = fold_convert_loc (loc, eptype, ret); *maybe_const &= maybe_const_itself; return ret; }
static bool generate_memset_zero (gimple stmt, tree op0, tree nb_iter, gimple_stmt_iterator bsi) { tree addr_base; tree nb_bytes = NULL; bool res = false; gimple_seq stmts = NULL, stmt_list = NULL; gimple fn_call; tree mem, fndecl, fntype, fn; gimple_stmt_iterator i; struct data_reference *dr = XCNEW (struct data_reference); location_t loc = gimple_location (stmt); DR_STMT (dr) = stmt; DR_REF (dr) = op0; if (!dr_analyze_innermost (dr)) goto end; /* Test for a positive stride, iterating over every element. */ if (integer_zerop (fold_build2_loc (loc, MINUS_EXPR, integer_type_node, DR_STEP (dr), TYPE_SIZE_UNIT (TREE_TYPE (op0))))) { tree offset = fold_convert_loc (loc, sizetype, size_binop_loc (loc, PLUS_EXPR, DR_OFFSET (dr), DR_INIT (dr))); addr_base = fold_build2_loc (loc, POINTER_PLUS_EXPR, TREE_TYPE (DR_BASE_ADDRESS (dr)), DR_BASE_ADDRESS (dr), offset); } /* Test for a negative stride, iterating over every element. */ else if (integer_zerop (fold_build2_loc (loc, PLUS_EXPR, integer_type_node, TYPE_SIZE_UNIT (TREE_TYPE (op0)), DR_STEP (dr)))) { nb_bytes = build_size_arg_loc (loc, nb_iter, op0, &stmt_list); addr_base = size_binop_loc (loc, PLUS_EXPR, DR_OFFSET (dr), DR_INIT (dr)); addr_base = fold_build2_loc (loc, MINUS_EXPR, sizetype, addr_base, fold_convert_loc (loc, sizetype, nb_bytes)); addr_base = force_gimple_operand (addr_base, &stmts, true, NULL); gimple_seq_add_seq (&stmt_list, stmts); addr_base = fold_build2_loc (loc, POINTER_PLUS_EXPR, TREE_TYPE (DR_BASE_ADDRESS (dr)), DR_BASE_ADDRESS (dr), addr_base); } else goto end; mem = force_gimple_operand (addr_base, &stmts, true, NULL); gimple_seq_add_seq (&stmt_list, stmts); fndecl = implicit_built_in_decls [BUILT_IN_MEMSET]; fntype = TREE_TYPE (fndecl); fn = build1 (ADDR_EXPR, build_pointer_type (fntype), fndecl); if (!nb_bytes) nb_bytes = build_size_arg_loc (loc, nb_iter, op0, &stmt_list); fn_call = gimple_build_call (fn, 3, mem, integer_zero_node, nb_bytes); gimple_seq_add_stmt (&stmt_list, fn_call); for (i = gsi_start (stmt_list); !gsi_end_p (i); gsi_next (&i)) { gimple s = gsi_stmt (i); update_stmt_if_modified (s); } gsi_insert_seq_after (&bsi, stmt_list, GSI_CONTINUE_LINKING); res = true; if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, "generated memset zero\n"); end: free_data_ref (dr); return res; }
void omp_extract_for_data (gomp_for *for_stmt, struct omp_for_data *fd, struct omp_for_data_loop *loops) { tree t, var, *collapse_iter, *collapse_count; tree count = NULL_TREE, iter_type = long_integer_type_node; struct omp_for_data_loop *loop; int i; struct omp_for_data_loop dummy_loop; location_t loc = gimple_location (for_stmt); bool simd = gimple_omp_for_kind (for_stmt) & GF_OMP_FOR_SIMD; bool distribute = gimple_omp_for_kind (for_stmt) == GF_OMP_FOR_KIND_DISTRIBUTE; bool taskloop = gimple_omp_for_kind (for_stmt) == GF_OMP_FOR_KIND_TASKLOOP; tree iterv, countv; fd->for_stmt = for_stmt; fd->pre = NULL; if (gimple_omp_for_collapse (for_stmt) > 1) fd->loops = loops; else fd->loops = &fd->loop; fd->have_nowait = distribute || simd; fd->have_ordered = false; fd->collapse = 1; fd->ordered = 0; fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC; fd->sched_modifiers = 0; fd->chunk_size = NULL_TREE; fd->simd_schedule = false; if (gimple_omp_for_kind (fd->for_stmt) == GF_OMP_FOR_KIND_CILKFOR) fd->sched_kind = OMP_CLAUSE_SCHEDULE_CILKFOR; collapse_iter = NULL; collapse_count = NULL; for (t = gimple_omp_for_clauses (for_stmt); t ; t = OMP_CLAUSE_CHAIN (t)) switch (OMP_CLAUSE_CODE (t)) { case OMP_CLAUSE_NOWAIT: fd->have_nowait = true; break; case OMP_CLAUSE_ORDERED: fd->have_ordered = true; if (OMP_CLAUSE_ORDERED_EXPR (t)) fd->ordered = tree_to_shwi (OMP_CLAUSE_ORDERED_EXPR (t)); break; case OMP_CLAUSE_SCHEDULE: gcc_assert (!distribute && !taskloop); fd->sched_kind = (enum omp_clause_schedule_kind) (OMP_CLAUSE_SCHEDULE_KIND (t) & OMP_CLAUSE_SCHEDULE_MASK); fd->sched_modifiers = (OMP_CLAUSE_SCHEDULE_KIND (t) & ~OMP_CLAUSE_SCHEDULE_MASK); fd->chunk_size = OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (t); fd->simd_schedule = OMP_CLAUSE_SCHEDULE_SIMD (t); break; case OMP_CLAUSE_DIST_SCHEDULE: gcc_assert (distribute); fd->chunk_size = OMP_CLAUSE_DIST_SCHEDULE_CHUNK_EXPR (t); break; case OMP_CLAUSE_COLLAPSE: fd->collapse = tree_to_shwi (OMP_CLAUSE_COLLAPSE_EXPR (t)); if (fd->collapse > 1) { collapse_iter = &OMP_CLAUSE_COLLAPSE_ITERVAR (t); collapse_count = &OMP_CLAUSE_COLLAPSE_COUNT (t); } break; default: break; } if (fd->ordered && fd->collapse == 1 && loops != NULL) { fd->loops = loops; iterv = NULL_TREE; countv = NULL_TREE; collapse_iter = &iterv; collapse_count = &countv; } /* FIXME: for now map schedule(auto) to schedule(static). There should be analysis to determine whether all iterations are approximately the same amount of work (then schedule(static) is best) or if it varies (then schedule(dynamic,N) is better). */ if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_AUTO) { fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC; gcc_assert (fd->chunk_size == NULL); } gcc_assert (fd->collapse == 1 || collapse_iter != NULL); if (taskloop) fd->sched_kind = OMP_CLAUSE_SCHEDULE_RUNTIME; if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME) gcc_assert (fd->chunk_size == NULL); else if (fd->chunk_size == NULL) { /* We only need to compute a default chunk size for ordered static loops and dynamic loops. */ if (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC || fd->have_ordered) fd->chunk_size = (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC) ? integer_zero_node : integer_one_node; } int cnt = fd->ordered ? fd->ordered : fd->collapse; for (i = 0; i < cnt; i++) { if (i == 0 && fd->collapse == 1 && (fd->ordered == 0 || loops == NULL)) loop = &fd->loop; else if (loops != NULL) loop = loops + i; else loop = &dummy_loop; loop->v = gimple_omp_for_index (for_stmt, i); gcc_assert (SSA_VAR_P (loop->v)); gcc_assert (TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE || TREE_CODE (TREE_TYPE (loop->v)) == POINTER_TYPE); var = TREE_CODE (loop->v) == SSA_NAME ? SSA_NAME_VAR (loop->v) : loop->v; loop->n1 = gimple_omp_for_initial (for_stmt, i); loop->cond_code = gimple_omp_for_cond (for_stmt, i); loop->n2 = gimple_omp_for_final (for_stmt, i); gcc_assert (loop->cond_code != NE_EXPR || gimple_omp_for_kind (for_stmt) == GF_OMP_FOR_KIND_CILKSIMD || gimple_omp_for_kind (for_stmt) == GF_OMP_FOR_KIND_CILKFOR); omp_adjust_for_condition (loc, &loop->cond_code, &loop->n2); t = gimple_omp_for_incr (for_stmt, i); gcc_assert (TREE_OPERAND (t, 0) == var); loop->step = omp_get_for_step_from_incr (loc, t); if (simd || (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC && !fd->have_ordered)) { if (fd->collapse == 1) iter_type = TREE_TYPE (loop->v); else if (i == 0 || TYPE_PRECISION (iter_type) < TYPE_PRECISION (TREE_TYPE (loop->v))) iter_type = build_nonstandard_integer_type (TYPE_PRECISION (TREE_TYPE (loop->v)), 1); } else if (iter_type != long_long_unsigned_type_node) { if (POINTER_TYPE_P (TREE_TYPE (loop->v))) iter_type = long_long_unsigned_type_node; else if (TYPE_UNSIGNED (TREE_TYPE (loop->v)) && TYPE_PRECISION (TREE_TYPE (loop->v)) >= TYPE_PRECISION (iter_type)) { tree n; if (loop->cond_code == LT_EXPR) n = fold_build2_loc (loc, PLUS_EXPR, TREE_TYPE (loop->v), loop->n2, loop->step); else n = loop->n1; if (TREE_CODE (n) != INTEGER_CST || tree_int_cst_lt (TYPE_MAX_VALUE (iter_type), n)) iter_type = long_long_unsigned_type_node; } else if (TYPE_PRECISION (TREE_TYPE (loop->v)) > TYPE_PRECISION (iter_type)) { tree n1, n2; if (loop->cond_code == LT_EXPR) { n1 = loop->n1; n2 = fold_build2_loc (loc, PLUS_EXPR, TREE_TYPE (loop->v), loop->n2, loop->step); } else { n1 = fold_build2_loc (loc, MINUS_EXPR, TREE_TYPE (loop->v), loop->n2, loop->step); n2 = loop->n1; } if (TREE_CODE (n1) != INTEGER_CST || TREE_CODE (n2) != INTEGER_CST || !tree_int_cst_lt (TYPE_MIN_VALUE (iter_type), n1) || !tree_int_cst_lt (n2, TYPE_MAX_VALUE (iter_type))) iter_type = long_long_unsigned_type_node; } } if (i >= fd->collapse) continue; if (collapse_count && *collapse_count == NULL) { t = fold_binary (loop->cond_code, boolean_type_node, fold_convert (TREE_TYPE (loop->v), loop->n1), fold_convert (TREE_TYPE (loop->v), loop->n2)); if (t && integer_zerop (t)) count = build_zero_cst (long_long_unsigned_type_node); else if ((i == 0 || count != NULL_TREE) && TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE && TREE_CONSTANT (loop->n1) && TREE_CONSTANT (loop->n2) && TREE_CODE (loop->step) == INTEGER_CST) { tree itype = TREE_TYPE (loop->v); if (POINTER_TYPE_P (itype)) itype = signed_type_for (itype); t = build_int_cst (itype, (loop->cond_code == LT_EXPR ? -1 : 1)); t = fold_build2_loc (loc, PLUS_EXPR, itype, fold_convert_loc (loc, itype, loop->step), t); t = fold_build2_loc (loc, PLUS_EXPR, itype, t, fold_convert_loc (loc, itype, loop->n2)); t = fold_build2_loc (loc, MINUS_EXPR, itype, t, fold_convert_loc (loc, itype, loop->n1)); if (TYPE_UNSIGNED (itype) && loop->cond_code == GT_EXPR) t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype, fold_build1_loc (loc, NEGATE_EXPR, itype, t), fold_build1_loc (loc, NEGATE_EXPR, itype, fold_convert_loc (loc, itype, loop->step))); else t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype, t, fold_convert_loc (loc, itype, loop->step)); t = fold_convert_loc (loc, long_long_unsigned_type_node, t); if (count != NULL_TREE) count = fold_build2_loc (loc, MULT_EXPR, long_long_unsigned_type_node, count, t); else count = t; if (TREE_CODE (count) != INTEGER_CST) count = NULL_TREE; } else if (count && !integer_zerop (count)) count = NULL_TREE; } } if (count && !simd && (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC || fd->have_ordered)) { if (!tree_int_cst_lt (count, TYPE_MAX_VALUE (long_integer_type_node))) iter_type = long_long_unsigned_type_node; else iter_type = long_integer_type_node; } else if (collapse_iter && *collapse_iter != NULL) iter_type = TREE_TYPE (*collapse_iter); fd->iter_type = iter_type; if (collapse_iter && *collapse_iter == NULL) *collapse_iter = create_tmp_var (iter_type, ".iter"); if (collapse_count && *collapse_count == NULL) { if (count) *collapse_count = fold_convert_loc (loc, iter_type, count); else *collapse_count = create_tmp_var (iter_type, ".count"); } if (fd->collapse > 1 || (fd->ordered && loops)) { fd->loop.v = *collapse_iter; fd->loop.n1 = build_int_cst (TREE_TYPE (fd->loop.v), 0); fd->loop.n2 = *collapse_count; fd->loop.step = build_int_cst (TREE_TYPE (fd->loop.v), 1); fd->loop.cond_code = LT_EXPR; } else if (loops) loops[0] = fd->loop; }
static tree check_omp_for_incr_expr (location_t loc, tree exp, tree decl) { tree t; if (!INTEGRAL_TYPE_P (TREE_TYPE (exp)) || TYPE_PRECISION (TREE_TYPE (exp)) < TYPE_PRECISION (TREE_TYPE (decl))) return error_mark_node; if (exp == decl) return build_int_cst (TREE_TYPE (exp), 0); switch (TREE_CODE (exp)) { CASE_CONVERT: t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 0), decl); if (t != error_mark_node) return fold_convert_loc (loc, TREE_TYPE (exp), t); break; case MINUS_EXPR: t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 0), decl); if (t != error_mark_node) return fold_build2_loc (loc, MINUS_EXPR, TREE_TYPE (exp), t, TREE_OPERAND (exp, 1)); break; case PLUS_EXPR: t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 0), decl); if (t != error_mark_node) return fold_build2_loc (loc, PLUS_EXPR, TREE_TYPE (exp), t, TREE_OPERAND (exp, 1)); t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 1), decl); if (t != error_mark_node) return fold_build2_loc (loc, PLUS_EXPR, TREE_TYPE (exp), TREE_OPERAND (exp, 0), t); break; case COMPOUND_EXPR: { /* cp_build_modify_expr forces preevaluation of the RHS to make sure that it is evaluated before the lvalue-rvalue conversion is applied to the LHS. Reconstruct the original expression. */ tree op0 = TREE_OPERAND (exp, 0); if (TREE_CODE (op0) == TARGET_EXPR && !VOID_TYPE_P (TREE_TYPE (op0))) { tree op1 = TREE_OPERAND (exp, 1); tree temp = TARGET_EXPR_SLOT (op0); if (BINARY_CLASS_P (op1) && TREE_OPERAND (op1, 1) == temp) { op1 = copy_node (op1); TREE_OPERAND (op1, 1) = TARGET_EXPR_INITIAL (op0); return check_omp_for_incr_expr (loc, op1, decl); } } break; } default: break; } return error_mark_node; }
tree convert (tree type, tree expr) { tree e = expr; enum tree_code code = TREE_CODE (type); const char *invalid_conv_diag; tree ret; location_t loc = EXPR_LOCATION (expr); if (type == error_mark_node || expr == error_mark_node || TREE_TYPE (expr) == error_mark_node) return error_mark_node; if ((invalid_conv_diag = targetm.invalid_conversion (TREE_TYPE (expr), type))) { error (invalid_conv_diag); return error_mark_node; } if (type == TREE_TYPE (expr)) return expr; ret = targetm.convert_to_type (type, expr); if (ret) return ret; STRIP_TYPE_NOPS (e); if (TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (TREE_TYPE (expr))) return fold_convert_loc (loc, type, expr); if (TREE_CODE (TREE_TYPE (expr)) == ERROR_MARK) return error_mark_node; if (TREE_CODE (TREE_TYPE (expr)) == VOID_TYPE) { error ("void value not ignored as it ought to be"); return error_mark_node; } switch (code) { case VOID_TYPE: return fold_convert_loc (loc, type, e); case INTEGER_TYPE: case ENUMERAL_TYPE: ret = convert_to_integer (type, e); goto maybe_fold; case BOOLEAN_TYPE: return fold_convert_loc (loc, type, c_objc_common_truthvalue_conversion (input_location, expr)); case POINTER_TYPE: case REFERENCE_TYPE: ret = convert_to_pointer (type, e); goto maybe_fold; case REAL_TYPE: ret = convert_to_real (type, e); goto maybe_fold; case FIXED_POINT_TYPE: ret = convert_to_fixed (type, e); goto maybe_fold; case COMPLEX_TYPE: ret = convert_to_complex (type, e); goto maybe_fold; case VECTOR_TYPE: ret = convert_to_vector (type, e); goto maybe_fold; case RECORD_TYPE: case UNION_TYPE: if (lang_hooks.types_compatible_p (type, TREE_TYPE (expr))) return e; break; default: break; maybe_fold: if (TREE_CODE (ret) != C_MAYBE_CONST_EXPR) ret = fold (ret); return ret; } error ("conversion to non-scalar type requested"); return error_mark_node; }
tree ubsan_instrument_shift (location_t loc, enum tree_code code, tree op0, tree op1) { tree t, tt = NULL_TREE; tree type0 = TREE_TYPE (op0); tree type1 = TREE_TYPE (op1); tree op1_utype = unsigned_type_for (type1); HOST_WIDE_INT op0_prec = TYPE_PRECISION (type0); tree uprecm1 = build_int_cst (op1_utype, op0_prec - 1); tree precm1 = build_int_cst (type1, op0_prec - 1); t = fold_convert_loc (loc, op1_utype, op1); t = fold_build2 (GT_EXPR, boolean_type_node, t, uprecm1); /* For signed x << y, in C99/C11, the following: (unsigned) x >> (precm1 - y) if non-zero, is undefined. */ if (code == LSHIFT_EXPR && !TYPE_UNSIGNED (type0) && flag_isoc99) { tree x = fold_build2 (MINUS_EXPR, integer_type_node, precm1, op1); tt = fold_convert_loc (loc, unsigned_type_for (type0), op0); tt = fold_build2 (RSHIFT_EXPR, TREE_TYPE (tt), tt, x); tt = fold_build2 (NE_EXPR, boolean_type_node, tt, build_int_cst (TREE_TYPE (tt), 0)); } /* For signed x << y, in C++11/C++14, the following: x < 0 || ((unsigned) x >> (precm1 - y)) if > 1, is undefined. */ if (code == LSHIFT_EXPR && !TYPE_UNSIGNED (TREE_TYPE (op0)) && (cxx_dialect == cxx11 || cxx_dialect == cxx1y)) { tree x = fold_build2 (MINUS_EXPR, integer_type_node, precm1, op1); tt = fold_convert_loc (loc, unsigned_type_for (type0), op0); tt = fold_build2 (RSHIFT_EXPR, TREE_TYPE (tt), tt, x); tt = fold_build2 (GT_EXPR, boolean_type_node, tt, build_int_cst (TREE_TYPE (tt), 1)); x = fold_build2 (LT_EXPR, boolean_type_node, op0, build_int_cst (type0, 0)); tt = fold_build2 (TRUTH_OR_EXPR, boolean_type_node, x, tt); } /* In case we have a SAVE_EXPR in a conditional context, we need to make sure it gets evaluated before the condition. */ t = fold_build2 (COMPOUND_EXPR, TREE_TYPE (t), op0, t); tree data = ubsan_create_data ("__ubsan_shift_data", loc, ubsan_type_descriptor (type0), ubsan_type_descriptor (type1), NULL_TREE); data = build_fold_addr_expr_loc (loc, data); t = fold_build2 (TRUTH_OR_EXPR, boolean_type_node, t, tt ? tt : integer_zero_node); tt = builtin_decl_explicit (BUILT_IN_UBSAN_HANDLE_SHIFT_OUT_OF_BOUNDS); tt = build_call_expr_loc (loc, tt, 3, data, ubsan_encode_value (op0), ubsan_encode_value (op1)); t = fold_build3 (COND_EXPR, void_type_node, t, tt, void_zero_node); return t; }
static void mf_build_check_statement_for (tree base, tree limit, gimple_stmt_iterator *instr_gsi, location_t location, tree dirflag) { gimple_stmt_iterator gsi; basic_block cond_bb, then_bb, join_bb; edge e; tree cond, t, u, v; tree mf_base; tree mf_elem; tree mf_limit; gimple g; gimple_seq seq, stmts; /* We first need to split the current basic block, and start altering the CFG. This allows us to insert the statements we're about to construct into the right basic blocks. */ cond_bb = gimple_bb (gsi_stmt (*instr_gsi)); gsi = *instr_gsi; gsi_prev (&gsi); if (! gsi_end_p (gsi)) e = split_block (cond_bb, gsi_stmt (gsi)); else e = split_block_after_labels (cond_bb); cond_bb = e->src; join_bb = e->dest; /* A recap at this point: join_bb is the basic block at whose head is the gimple statement for which this check expression is being built. cond_bb is the (possibly new, synthetic) basic block the end of which will contain the cache-lookup code, and a conditional that jumps to the cache-miss code or, much more likely, over to join_bb. */ /* Create the bb that contains the cache-miss fallback block (mf_check). */ then_bb = create_empty_bb (cond_bb); make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE); make_single_succ_edge (then_bb, join_bb, EDGE_FALLTHRU); /* Mark the pseudo-fallthrough edge from cond_bb to join_bb. */ e = find_edge (cond_bb, join_bb); e->flags = EDGE_FALSE_VALUE; e->count = cond_bb->count; e->probability = REG_BR_PROB_BASE; /* Update dominance info. Note that bb_join's data was updated by split_block. */ if (dom_info_available_p (CDI_DOMINATORS)) { set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb); set_immediate_dominator (CDI_DOMINATORS, join_bb, cond_bb); } /* Update loop info. */ if (current_loops) add_bb_to_loop (then_bb, cond_bb->loop_father); /* Build our local variables. */ mf_elem = create_tmp_reg (mf_cache_structptr_type, "__mf_elem"); mf_base = create_tmp_reg (mf_uintptr_type, "__mf_base"); mf_limit = create_tmp_reg (mf_uintptr_type, "__mf_limit"); /* Build: __mf_base = (uintptr_t) <base address expression>. */ seq = NULL; t = fold_convert_loc (location, mf_uintptr_type, unshare_expr (base)); t = force_gimple_operand (t, &stmts, false, NULL_TREE); gimple_seq_add_seq (&seq, stmts); g = gimple_build_assign (mf_base, t); gimple_set_location (g, location); gimple_seq_add_stmt (&seq, g); /* Build: __mf_limit = (uintptr_t) <limit address expression>. */ t = fold_convert_loc (location, mf_uintptr_type, unshare_expr (limit)); t = force_gimple_operand (t, &stmts, false, NULL_TREE); gimple_seq_add_seq (&seq, stmts); g = gimple_build_assign (mf_limit, t); gimple_set_location (g, location); gimple_seq_add_stmt (&seq, g); /* Build: __mf_elem = &__mf_lookup_cache [(__mf_base >> __mf_shift) & __mf_mask]. */ t = build2 (RSHIFT_EXPR, mf_uintptr_type, mf_base, flag_mudflap_threads ? mf_cache_shift_decl : mf_cache_shift_decl_l); t = build2 (BIT_AND_EXPR, mf_uintptr_type, t, flag_mudflap_threads ? mf_cache_mask_decl : mf_cache_mask_decl_l); t = build4 (ARRAY_REF, TREE_TYPE (TREE_TYPE (mf_cache_array_decl)), mf_cache_array_decl, t, NULL_TREE, NULL_TREE); t = build1 (ADDR_EXPR, mf_cache_structptr_type, t); t = force_gimple_operand (t, &stmts, false, NULL_TREE); gimple_seq_add_seq (&seq, stmts); g = gimple_build_assign (mf_elem, t); gimple_set_location (g, location); gimple_seq_add_stmt (&seq, g); /* Quick validity check. if (__mf_elem->low > __mf_base || (__mf_elem_high < __mf_limit)) { __mf_check (); ... and only if single-threaded: __mf_lookup_shift_1 = f...; __mf_lookup_mask_l = ...; } It is expected that this body of code is rarely executed so we mark the edge to the THEN clause of the conditional jump as unlikely. */ /* Construct t <-- '__mf_elem->low > __mf_base'. */ t = build3 (COMPONENT_REF, mf_uintptr_type, build1 (INDIRECT_REF, mf_cache_struct_type, mf_elem), TYPE_FIELDS (mf_cache_struct_type), NULL_TREE); t = build2 (GT_EXPR, boolean_type_node, t, mf_base); /* Construct '__mf_elem->high < __mf_limit'. First build: 1) u <-- '__mf_elem->high' 2) v <-- '__mf_limit'. Then build 'u <-- (u < v). */ u = build3 (COMPONENT_REF, mf_uintptr_type, build1 (INDIRECT_REF, mf_cache_struct_type, mf_elem), DECL_CHAIN (TYPE_FIELDS (mf_cache_struct_type)), NULL_TREE); v = mf_limit; u = build2 (LT_EXPR, boolean_type_node, u, v); /* Build the composed conditional: t <-- 't || u'. Then store the result of the evaluation of 't' in a temporary variable which we can use as the condition for the conditional jump. */ t = build2 (TRUTH_OR_EXPR, boolean_type_node, t, u); t = force_gimple_operand (t, &stmts, false, NULL_TREE); gimple_seq_add_seq (&seq, stmts); cond = create_tmp_reg (boolean_type_node, "__mf_unlikely_cond"); g = gimple_build_assign (cond, t); gimple_set_location (g, location); gimple_seq_add_stmt (&seq, g); /* Build the conditional jump. 'cond' is just a temporary so we can simply build a void COND_EXPR. We do need labels in both arms though. */ g = gimple_build_cond (NE_EXPR, cond, boolean_false_node, NULL_TREE, NULL_TREE); gimple_set_location (g, location); gimple_seq_add_stmt (&seq, g); /* At this point, after so much hard work, we have only constructed the conditional jump, if (__mf_elem->low > __mf_base || (__mf_elem_high < __mf_limit)) The lowered GIMPLE tree representing this code is in the statement list starting at 'head'. We can insert this now in the current basic block, i.e. the one that the statement we're instrumenting was originally in. */ gsi = gsi_last_bb (cond_bb); gsi_insert_seq_after (&gsi, seq, GSI_CONTINUE_LINKING); /* Now build up the body of the cache-miss handling: __mf_check(); refresh *_l vars. This is the body of the conditional. */ seq = NULL; /* u is a string, so it is already a gimple value. */ u = mf_file_function_line_tree (location); /* NB: we pass the overall [base..limit] range to mf_check. */ v = fold_build2_loc (location, PLUS_EXPR, mf_uintptr_type, fold_build2_loc (location, MINUS_EXPR, mf_uintptr_type, mf_limit, mf_base), build_int_cst (mf_uintptr_type, 1)); v = force_gimple_operand (v, &stmts, true, NULL_TREE); gimple_seq_add_seq (&seq, stmts); g = gimple_build_call (mf_check_fndecl, 4, mf_base, v, dirflag, u); gimple_seq_add_stmt (&seq, g); if (! flag_mudflap_threads) { if (stmt_ends_bb_p (g)) { gsi = gsi_start_bb (then_bb); gsi_insert_seq_after (&gsi, seq, GSI_CONTINUE_LINKING); e = split_block (then_bb, g); then_bb = e->dest; seq = NULL; } g = gimple_build_assign (mf_cache_shift_decl_l, mf_cache_shift_decl); gimple_seq_add_stmt (&seq, g); g = gimple_build_assign (mf_cache_mask_decl_l, mf_cache_mask_decl); gimple_seq_add_stmt (&seq, g); } /* Insert the check code in the THEN block. */ gsi = gsi_start_bb (then_bb); gsi_insert_seq_after (&gsi, seq, GSI_CONTINUE_LINKING); *instr_gsi = gsi_start_bb (join_bb); }
tree ubsan_instrument_shift (location_t loc, enum tree_code code, tree op0, tree op1) { tree t, tt = NULL_TREE; tree type0 = TREE_TYPE (op0); tree type1 = TREE_TYPE (op1); if (!INTEGRAL_TYPE_P (type0)) return NULL_TREE; tree op1_utype = unsigned_type_for (type1); HOST_WIDE_INT op0_prec = TYPE_PRECISION (type0); tree uprecm1 = build_int_cst (op1_utype, op0_prec - 1); op0 = unshare_expr (op0); op1 = unshare_expr (op1); t = fold_convert_loc (loc, op1_utype, op1); t = fold_build2 (GT_EXPR, boolean_type_node, t, uprecm1); /* If this is not a signed operation, don't perform overflow checks. Also punt on bit-fields. */ if (TYPE_OVERFLOW_WRAPS (type0) || GET_MODE_BITSIZE (TYPE_MODE (type0)) != TYPE_PRECISION (type0) || (flag_sanitize & SANITIZE_SHIFT_BASE) == 0) ; /* For signed x << y, in C99/C11, the following: (unsigned) x >> (uprecm1 - y) if non-zero, is undefined. */ else if (code == LSHIFT_EXPR && flag_isoc99 && cxx_dialect < cxx11) { tree x = fold_build2 (MINUS_EXPR, op1_utype, uprecm1, fold_convert (op1_utype, unshare_expr (op1))); tt = fold_convert_loc (loc, unsigned_type_for (type0), op0); tt = fold_build2 (RSHIFT_EXPR, TREE_TYPE (tt), tt, x); tt = fold_build2 (NE_EXPR, boolean_type_node, tt, build_int_cst (TREE_TYPE (tt), 0)); } /* For signed x << y, in C++11 and later, the following: x < 0 || ((unsigned) x >> (uprecm1 - y)) if > 1, is undefined. */ else if (code == LSHIFT_EXPR && cxx_dialect >= cxx11) { tree x = fold_build2 (MINUS_EXPR, op1_utype, uprecm1, fold_convert (op1_utype, unshare_expr (op1))); tt = fold_convert_loc (loc, unsigned_type_for (type0), unshare_expr (op0)); tt = fold_build2 (RSHIFT_EXPR, TREE_TYPE (tt), tt, x); tt = fold_build2 (GT_EXPR, boolean_type_node, tt, build_int_cst (TREE_TYPE (tt), 1)); x = fold_build2 (LT_EXPR, boolean_type_node, unshare_expr (op0), build_int_cst (type0, 0)); tt = fold_build2 (TRUTH_OR_EXPR, boolean_type_node, x, tt); } /* If the condition was folded to 0, no need to instrument this expression. */ if (integer_zerop (t) && (tt == NULL_TREE || integer_zerop (tt))) return NULL_TREE; /* In case we have a SAVE_EXPR in a conditional context, we need to make sure it gets evaluated before the condition. */ t = fold_build2 (COMPOUND_EXPR, TREE_TYPE (t), unshare_expr (op0), t); enum sanitize_code recover_kind = SANITIZE_SHIFT_EXPONENT; tree else_t = void_node; if (tt) { if ((flag_sanitize & SANITIZE_SHIFT_EXPONENT) == 0) { t = fold_build1 (TRUTH_NOT_EXPR, boolean_type_node, t); t = fold_build2 (TRUTH_AND_EXPR, boolean_type_node, t, tt); recover_kind = SANITIZE_SHIFT_BASE; } else { if (flag_sanitize_undefined_trap_on_error || ((!(flag_sanitize_recover & SANITIZE_SHIFT_EXPONENT)) == (!(flag_sanitize_recover & SANITIZE_SHIFT_BASE)))) t = fold_build2 (TRUTH_OR_EXPR, boolean_type_node, t, tt); else else_t = tt; } } if (flag_sanitize_undefined_trap_on_error) tt = build_call_expr_loc (loc, builtin_decl_explicit (BUILT_IN_TRAP), 0); else { tree data = ubsan_create_data ("__ubsan_shift_data", 1, &loc, ubsan_type_descriptor (type0), ubsan_type_descriptor (type1), NULL_TREE, NULL_TREE); data = build_fold_addr_expr_loc (loc, data); enum built_in_function bcode = (flag_sanitize_recover & recover_kind) ? BUILT_IN_UBSAN_HANDLE_SHIFT_OUT_OF_BOUNDS : BUILT_IN_UBSAN_HANDLE_SHIFT_OUT_OF_BOUNDS_ABORT; tt = builtin_decl_explicit (bcode); op0 = unshare_expr (op0); op1 = unshare_expr (op1); tt = build_call_expr_loc (loc, tt, 3, data, ubsan_encode_value (op0), ubsan_encode_value (op1)); if (else_t != void_node) { bcode = (flag_sanitize_recover & SANITIZE_SHIFT_BASE) ? BUILT_IN_UBSAN_HANDLE_SHIFT_OUT_OF_BOUNDS : BUILT_IN_UBSAN_HANDLE_SHIFT_OUT_OF_BOUNDS_ABORT; tree else_tt = builtin_decl_explicit (bcode); op0 = unshare_expr (op0); op1 = unshare_expr (op1); else_tt = build_call_expr_loc (loc, else_tt, 3, data, ubsan_encode_value (op0), ubsan_encode_value (op1)); else_t = fold_build3 (COND_EXPR, void_type_node, else_t, else_tt, void_node); } } t = fold_build3 (COND_EXPR, void_type_node, t, tt, else_t); return t; }
tree cilk_for_number_of_iterations (tree cilk_for) { tree t, v, n1, n2, step, type, init, cond, incr, itype; enum tree_code cond_code; location_t loc = EXPR_LOCATION (cilk_for); init = TREE_VEC_ELT (OMP_FOR_INIT (cilk_for), 0); v = TREE_OPERAND (init, 0); cond = TREE_VEC_ELT (OMP_FOR_COND (cilk_for), 0); incr = TREE_VEC_ELT (OMP_FOR_INCR (cilk_for), 0); type = TREE_TYPE (v); gcc_assert (TREE_CODE (TREE_TYPE (v)) == INTEGER_TYPE || TREE_CODE (TREE_TYPE (v)) == POINTER_TYPE); n1 = TREE_OPERAND (init, 1); cond_code = TREE_CODE (cond); n2 = TREE_OPERAND (cond, 1); switch (cond_code) { case LT_EXPR: case GT_EXPR: case NE_EXPR: break; case LE_EXPR: if (POINTER_TYPE_P (TREE_TYPE (n2))) n2 = fold_build_pointer_plus_hwi_loc (loc, n2, 1); else n2 = fold_build2_loc (loc, PLUS_EXPR, TREE_TYPE (n2), n2, build_int_cst (TREE_TYPE (n2), 1)); cond_code = LT_EXPR; break; case GE_EXPR: if (POINTER_TYPE_P (TREE_TYPE (n2))) n2 = fold_build_pointer_plus_hwi_loc (loc, n2, -1); else n2 = fold_build2_loc (loc, MINUS_EXPR, TREE_TYPE (n2), n2, build_int_cst (TREE_TYPE (n2), 1)); cond_code = GT_EXPR; break; default: gcc_unreachable (); } step = NULL_TREE; switch (TREE_CODE (incr)) { case PREINCREMENT_EXPR: case POSTINCREMENT_EXPR: step = build_int_cst (TREE_TYPE (v), 1); break; case PREDECREMENT_EXPR: case POSTDECREMENT_EXPR: step = build_int_cst (TREE_TYPE (v), -1); break; case MODIFY_EXPR: t = TREE_OPERAND (incr, 1); gcc_assert (TREE_OPERAND (t, 0) == v); switch (TREE_CODE (t)) { case PLUS_EXPR: step = TREE_OPERAND (t, 1); break; case POINTER_PLUS_EXPR: step = fold_convert (ssizetype, TREE_OPERAND (t, 1)); break; case MINUS_EXPR: step = TREE_OPERAND (t, 1); step = fold_build1_loc (loc, NEGATE_EXPR, TREE_TYPE (step), step); break; default: gcc_unreachable (); } break; default: gcc_unreachable (); } itype = type; if (POINTER_TYPE_P (itype)) itype = signed_type_for (itype); if (cond_code == NE_EXPR) { /* For NE_EXPR, we need to find out if the iterator increases or decreases from whether step is positive or negative. */ tree stype = itype; if (TYPE_UNSIGNED (stype)) stype = signed_type_for (stype); cond = fold_build2_loc (loc, GE_EXPR, boolean_type_node, fold_convert_loc (loc, stype, step), build_int_cst (stype, 0)); t = fold_build3_loc (loc, COND_EXPR, itype, cond, build_int_cst (itype, -1), build_int_cst (itype, 1)); } else t = build_int_cst (itype, (cond_code == LT_EXPR ? -1 : 1)); t = fold_build2_loc (loc, PLUS_EXPR, itype, fold_convert_loc (loc, itype, step), t); t = fold_build2_loc (loc, PLUS_EXPR, itype, t, fold_convert_loc (loc, itype, n2)); t = fold_build2_loc (loc, MINUS_EXPR, itype, t, fold_convert_loc (loc, itype, n1)); if (TYPE_UNSIGNED (itype) && cond_code == GT_EXPR) t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype, fold_build1_loc (loc, NEGATE_EXPR, itype, t), fold_build1_loc (loc, NEGATE_EXPR, itype, fold_convert_loc (loc, itype, step))); else if (TYPE_UNSIGNED (itype) && cond_code == NE_EXPR) { tree t1 = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype, t, fold_convert_loc (loc, itype, step)); tree t2 = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype, fold_build1_loc (loc, NEGATE_EXPR, itype, t), fold_build1_loc (loc, NEGATE_EXPR, itype, fold_convert_loc (loc, itype, step))); t = fold_build3_loc (loc, COND_EXPR, itype, cond, t1, t2); } else t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype, t, fold_convert_loc (loc, itype, step)); cond = fold_build2_loc (loc, cond_code, boolean_type_node, n1, n2); t = fold_build3_loc (loc, COND_EXPR, itype, cond, t, build_int_cst (itype, 0)); return t; }
tree convert (tree type, tree expr) { tree e = expr; enum tree_code code = TREE_CODE (type); const char *invalid_conv_diag; tree ret; location_t loc = EXPR_LOCATION (expr); if (type == error_mark_node || error_operand_p (expr)) return error_mark_node; if ((invalid_conv_diag = targetm.invalid_conversion (TREE_TYPE (expr), type))) { error (invalid_conv_diag); return error_mark_node; } if (type == TREE_TYPE (expr)) return expr; ret = targetm.convert_to_type (type, expr); if (ret) return ret; STRIP_TYPE_NOPS (e); if (TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (TREE_TYPE (expr)) && (TREE_CODE (TREE_TYPE (expr)) != COMPLEX_TYPE || TREE_CODE (e) == COMPLEX_EXPR)) return fold_convert_loc (loc, type, expr); if (TREE_CODE (TREE_TYPE (expr)) == ERROR_MARK) return error_mark_node; if (TREE_CODE (TREE_TYPE (expr)) == VOID_TYPE) { error ("void value not ignored as it ought to be"); return error_mark_node; } switch (code) { case VOID_TYPE: return fold_convert_loc (loc, type, e); case INTEGER_TYPE: case ENUMERAL_TYPE: if (flag_sanitize & SANITIZE_FLOAT_CAST && TREE_CODE (TREE_TYPE (expr)) == REAL_TYPE && COMPLETE_TYPE_P (type) && current_function_decl != NULL_TREE && !lookup_attribute ("no_sanitize_undefined", DECL_ATTRIBUTES (current_function_decl))) { tree arg; if (in_late_binary_op) { expr = save_expr (expr); arg = expr; } else { expr = c_save_expr (expr); arg = c_fully_fold (expr, false, NULL); } tree check = ubsan_instrument_float_cast (loc, type, expr, arg); expr = fold_build1 (FIX_TRUNC_EXPR, type, expr); if (check == NULL) return expr; return fold_build2 (COMPOUND_EXPR, TREE_TYPE (expr), check, expr); } ret = convert_to_integer (type, e); goto maybe_fold; case BOOLEAN_TYPE: return fold_convert_loc (loc, type, c_objc_common_truthvalue_conversion (input_location, expr)); case POINTER_TYPE: case REFERENCE_TYPE: ret = convert_to_pointer (type, e); goto maybe_fold; case REAL_TYPE: ret = convert_to_real (type, e); goto maybe_fold; case FIXED_POINT_TYPE: ret = convert_to_fixed (type, e); goto maybe_fold; case COMPLEX_TYPE: /* If converting from COMPLEX_TYPE to a different COMPLEX_TYPE and e is not COMPLEX_EXPR, convert_to_complex uses save_expr, but for the C FE c_save_expr needs to be called instead. */ if (TREE_CODE (TREE_TYPE (e)) == COMPLEX_TYPE) { if (TREE_CODE (e) != COMPLEX_EXPR) { tree subtype = TREE_TYPE (type); tree elt_type = TREE_TYPE (TREE_TYPE (e)); if (in_late_binary_op) e = save_expr (e); else e = c_save_expr (e); ret = fold_build2_loc (loc, COMPLEX_EXPR, type, convert (subtype, fold_build1 (REALPART_EXPR, elt_type, e)), convert (subtype, fold_build1 (IMAGPART_EXPR, elt_type, e))); goto maybe_fold; } } ret = convert_to_complex (type, e); goto maybe_fold; case VECTOR_TYPE: ret = convert_to_vector (type, e); goto maybe_fold; case RECORD_TYPE: case UNION_TYPE: if (lang_hooks.types_compatible_p (type, TREE_TYPE (expr))) return e; break; default: break; maybe_fold: if (TREE_CODE (ret) != C_MAYBE_CONST_EXPR) ret = fold (ret); return ret; } error ("conversion to non-scalar type requested"); return error_mark_node; }
tree c_finish_omp_for (location_t locus, tree declv, tree initv, tree condv, tree incrv, tree body, tree pre_body) { location_t elocus; bool fail = false; int i; gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (initv)); gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (condv)); gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (incrv)); for (i = 0; i < TREE_VEC_LENGTH (declv); i++) { tree decl = TREE_VEC_ELT (declv, i); tree init = TREE_VEC_ELT (initv, i); tree cond = TREE_VEC_ELT (condv, i); tree incr = TREE_VEC_ELT (incrv, i); elocus = locus; if (EXPR_HAS_LOCATION (init)) elocus = EXPR_LOCATION (init); /* Validate the iteration variable. */ if (!INTEGRAL_TYPE_P (TREE_TYPE (decl)) && TREE_CODE (TREE_TYPE (decl)) != POINTER_TYPE) { error_at (elocus, "invalid type for iteration variable %qE", decl); fail = true; } /* In the case of "for (int i = 0...)", init will be a decl. It should have a DECL_INITIAL that we can turn into an assignment. */ if (init == decl) { elocus = DECL_SOURCE_LOCATION (decl); init = DECL_INITIAL (decl); if (init == NULL) { error_at (elocus, "%qE is not initialized", decl); init = integer_zero_node; fail = true; } init = build_modify_expr (elocus, decl, NULL_TREE, NOP_EXPR, /* FIXME diagnostics: This should be the location of the INIT. */ elocus, init, NULL_TREE); } gcc_assert (TREE_CODE (init) == MODIFY_EXPR); gcc_assert (TREE_OPERAND (init, 0) == decl); if (cond == NULL_TREE) { error_at (elocus, "missing controlling predicate"); fail = true; } else { bool cond_ok = false; if (EXPR_HAS_LOCATION (cond)) elocus = EXPR_LOCATION (cond); if (TREE_CODE (cond) == LT_EXPR || TREE_CODE (cond) == LE_EXPR || TREE_CODE (cond) == GT_EXPR || TREE_CODE (cond) == GE_EXPR || TREE_CODE (cond) == NE_EXPR || TREE_CODE (cond) == EQ_EXPR) { tree op0 = TREE_OPERAND (cond, 0); tree op1 = TREE_OPERAND (cond, 1); /* 2.5.1. The comparison in the condition is computed in the type of DECL, otherwise the behavior is undefined. For example: long n; int i; i < n; according to ISO will be evaluated as: (long)i < n; We want to force: i < (int)n; */ if (TREE_CODE (op0) == NOP_EXPR && decl == TREE_OPERAND (op0, 0)) { TREE_OPERAND (cond, 0) = TREE_OPERAND (op0, 0); TREE_OPERAND (cond, 1) = fold_build1_loc (elocus, NOP_EXPR, TREE_TYPE (decl), TREE_OPERAND (cond, 1)); } else if (TREE_CODE (op1) == NOP_EXPR && decl == TREE_OPERAND (op1, 0)) { TREE_OPERAND (cond, 1) = TREE_OPERAND (op1, 0); TREE_OPERAND (cond, 0) = fold_build1_loc (elocus, NOP_EXPR, TREE_TYPE (decl), TREE_OPERAND (cond, 0)); } if (decl == TREE_OPERAND (cond, 0)) cond_ok = true; else if (decl == TREE_OPERAND (cond, 1)) { TREE_SET_CODE (cond, swap_tree_comparison (TREE_CODE (cond))); TREE_OPERAND (cond, 1) = TREE_OPERAND (cond, 0); TREE_OPERAND (cond, 0) = decl; cond_ok = true; } if (TREE_CODE (cond) == NE_EXPR || TREE_CODE (cond) == EQ_EXPR) { if (!INTEGRAL_TYPE_P (TREE_TYPE (decl))) cond_ok = false; else if (operand_equal_p (TREE_OPERAND (cond, 1), TYPE_MIN_VALUE (TREE_TYPE (decl)), 0)) TREE_SET_CODE (cond, TREE_CODE (cond) == NE_EXPR ? GT_EXPR : LE_EXPR); else if (operand_equal_p (TREE_OPERAND (cond, 1), TYPE_MAX_VALUE (TREE_TYPE (decl)), 0)) TREE_SET_CODE (cond, TREE_CODE (cond) == NE_EXPR ? LT_EXPR : GE_EXPR); else cond_ok = false; } } if (!cond_ok) { error_at (elocus, "invalid controlling predicate"); fail = true; } } if (incr == NULL_TREE) { error_at (elocus, "missing increment expression"); fail = true; } else { bool incr_ok = false; if (EXPR_HAS_LOCATION (incr)) elocus = EXPR_LOCATION (incr); /* Check all the valid increment expressions: v++, v--, ++v, --v, v = v + incr, v = incr + v and v = v - incr. */ switch (TREE_CODE (incr)) { case POSTINCREMENT_EXPR: case PREINCREMENT_EXPR: case POSTDECREMENT_EXPR: case PREDECREMENT_EXPR: if (TREE_OPERAND (incr, 0) != decl) break; incr_ok = true; if (POINTER_TYPE_P (TREE_TYPE (decl)) && TREE_OPERAND (incr, 1)) { tree t = fold_convert_loc (elocus, sizetype, TREE_OPERAND (incr, 1)); if (TREE_CODE (incr) == POSTDECREMENT_EXPR || TREE_CODE (incr) == PREDECREMENT_EXPR) t = fold_build1_loc (elocus, NEGATE_EXPR, sizetype, t); t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (decl), decl, t); incr = build2 (MODIFY_EXPR, void_type_node, decl, t); } break; case MODIFY_EXPR: if (TREE_OPERAND (incr, 0) != decl) break; if (TREE_OPERAND (incr, 1) == decl) break; if (TREE_CODE (TREE_OPERAND (incr, 1)) == PLUS_EXPR && (TREE_OPERAND (TREE_OPERAND (incr, 1), 0) == decl || TREE_OPERAND (TREE_OPERAND (incr, 1), 1) == decl)) incr_ok = true; else if ((TREE_CODE (TREE_OPERAND (incr, 1)) == MINUS_EXPR || (TREE_CODE (TREE_OPERAND (incr, 1)) == POINTER_PLUS_EXPR)) && TREE_OPERAND (TREE_OPERAND (incr, 1), 0) == decl) incr_ok = true; else { tree t = check_omp_for_incr_expr (elocus, TREE_OPERAND (incr, 1), decl); if (t != error_mark_node) { incr_ok = true; t = build2 (PLUS_EXPR, TREE_TYPE (decl), decl, t); incr = build2 (MODIFY_EXPR, void_type_node, decl, t); } } break; default: break; } if (!incr_ok) { error_at (elocus, "invalid increment expression"); fail = true; } } TREE_VEC_ELT (initv, i) = init; TREE_VEC_ELT (incrv, i) = incr; } if (fail) return NULL; else { tree t = make_node (OMP_FOR); TREE_TYPE (t) = void_type_node; OMP_FOR_INIT (t) = initv; OMP_FOR_COND (t) = condv; OMP_FOR_INCR (t) = incrv; OMP_FOR_BODY (t) = body; OMP_FOR_PRE_BODY (t) = pre_body; SET_EXPR_LOCATION (t, locus); return add_stmt (t); } }
tree ubsan_instrument_shift (location_t loc, enum tree_code code, tree op0, tree op1) { tree t, tt = NULL_TREE; tree type0 = TREE_TYPE (op0); tree type1 = TREE_TYPE (op1); tree op1_utype = unsigned_type_for (type1); HOST_WIDE_INT op0_prec = TYPE_PRECISION (type0); tree uprecm1 = build_int_cst (op1_utype, op0_prec - 1); t = fold_convert_loc (loc, op1_utype, op1); t = fold_build2 (GT_EXPR, boolean_type_node, t, uprecm1); /* For signed x << y, in C99/C11, the following: (unsigned) x >> (uprecm1 - y) if non-zero, is undefined. */ if (code == LSHIFT_EXPR && !TYPE_UNSIGNED (type0) && flag_isoc99) { tree x = fold_build2 (MINUS_EXPR, unsigned_type_node, uprecm1, fold_convert (op1_utype, op1)); tt = fold_convert_loc (loc, unsigned_type_for (type0), op0); tt = fold_build2 (RSHIFT_EXPR, TREE_TYPE (tt), tt, x); tt = fold_build2 (NE_EXPR, boolean_type_node, tt, build_int_cst (TREE_TYPE (tt), 0)); } /* For signed x << y, in C++11 and later, the following: x < 0 || ((unsigned) x >> (uprecm1 - y)) if > 1, is undefined. */ if (code == LSHIFT_EXPR && !TYPE_UNSIGNED (TREE_TYPE (op0)) && (cxx_dialect >= cxx11)) { tree x = fold_build2 (MINUS_EXPR, unsigned_type_node, uprecm1, fold_convert (op1_utype, op1)); tt = fold_convert_loc (loc, unsigned_type_for (type0), op0); tt = fold_build2 (RSHIFT_EXPR, TREE_TYPE (tt), tt, x); tt = fold_build2 (GT_EXPR, boolean_type_node, tt, build_int_cst (TREE_TYPE (tt), 1)); x = fold_build2 (LT_EXPR, boolean_type_node, op0, build_int_cst (type0, 0)); tt = fold_build2 (TRUTH_OR_EXPR, boolean_type_node, x, tt); } /* If the condition was folded to 0, no need to instrument this expression. */ if (integer_zerop (t) && (tt == NULL_TREE || integer_zerop (tt))) return NULL_TREE; /* In case we have a SAVE_EXPR in a conditional context, we need to make sure it gets evaluated before the condition. If the OP0 is an instrumented array reference, mark it as having side effects so it's not folded away. */ if (flag_sanitize & SANITIZE_BOUNDS) { tree xop0 = op0; while (CONVERT_EXPR_P (xop0)) xop0 = TREE_OPERAND (xop0, 0); if (TREE_CODE (xop0) == ARRAY_REF) { TREE_SIDE_EFFECTS (xop0) = 1; TREE_SIDE_EFFECTS (op0) = 1; } } t = fold_build2 (COMPOUND_EXPR, TREE_TYPE (t), op0, t); t = fold_build2 (TRUTH_OR_EXPR, boolean_type_node, t, tt ? tt : integer_zero_node); if (flag_sanitize_undefined_trap_on_error) tt = build_call_expr_loc (loc, builtin_decl_explicit (BUILT_IN_TRAP), 0); else { tree data = ubsan_create_data ("__ubsan_shift_data", 1, &loc, ubsan_type_descriptor (type0), ubsan_type_descriptor (type1), NULL_TREE, NULL_TREE); data = build_fold_addr_expr_loc (loc, data); enum built_in_function bcode = (flag_sanitize_recover & SANITIZE_SHIFT) ? BUILT_IN_UBSAN_HANDLE_SHIFT_OUT_OF_BOUNDS : BUILT_IN_UBSAN_HANDLE_SHIFT_OUT_OF_BOUNDS_ABORT; tt = builtin_decl_explicit (bcode); tt = build_call_expr_loc (loc, tt, 3, data, ubsan_encode_value (op0), ubsan_encode_value (op1)); } t = fold_build3 (COND_EXPR, void_type_node, t, tt, void_node); return t; }
static void lower_builtin_setjmp (gimple_stmt_iterator *gsi) { gimple stmt = gsi_stmt (*gsi); location_t loc = gimple_location (stmt); tree cont_label = create_artificial_label (loc); tree next_label = create_artificial_label (loc); tree dest, t, arg; gimple g; /* NEXT_LABEL is the label __builtin_longjmp will jump to. Its address is passed to both __builtin_setjmp_setup and __builtin_setjmp_receiver. */ FORCED_LABEL (next_label) = 1; dest = gimple_call_lhs (stmt); /* Build '__builtin_setjmp_setup (BUF, NEXT_LABEL)' and insert. */ arg = build_addr (next_label, current_function_decl); t = builtin_decl_implicit (BUILT_IN_SETJMP_SETUP); g = gimple_build_call (t, 2, gimple_call_arg (stmt, 0), arg); gimple_set_location (g, loc); gimple_set_block (g, gimple_block (stmt)); gsi_insert_before (gsi, g, GSI_SAME_STMT); /* Build 'DEST = 0' and insert. */ if (dest) { g = gimple_build_assign (dest, build_zero_cst (TREE_TYPE (dest))); gimple_set_location (g, loc); gimple_set_block (g, gimple_block (stmt)); gsi_insert_before (gsi, g, GSI_SAME_STMT); } /* Build 'goto CONT_LABEL' and insert. */ g = gimple_build_goto (cont_label); gsi_insert_before (gsi, g, GSI_SAME_STMT); /* Build 'NEXT_LABEL:' and insert. */ g = gimple_build_label (next_label); gsi_insert_before (gsi, g, GSI_SAME_STMT); /* Build '__builtin_setjmp_receiver (NEXT_LABEL)' and insert. */ arg = build_addr (next_label, current_function_decl); t = builtin_decl_implicit (BUILT_IN_SETJMP_RECEIVER); g = gimple_build_call (t, 1, arg); gimple_set_location (g, loc); gimple_set_block (g, gimple_block (stmt)); gsi_insert_before (gsi, g, GSI_SAME_STMT); /* Build 'DEST = 1' and insert. */ if (dest) { g = gimple_build_assign (dest, fold_convert_loc (loc, TREE_TYPE (dest), integer_one_node)); gimple_set_location (g, loc); gimple_set_block (g, gimple_block (stmt)); gsi_insert_before (gsi, g, GSI_SAME_STMT); } /* Build 'CONT_LABEL:' and insert. */ g = gimple_build_label (cont_label); gsi_insert_before (gsi, g, GSI_SAME_STMT); /* Remove the call to __builtin_setjmp. */ gsi_remove (gsi, false); }
static void lower_builtin_setjmp (gimple_stmt_iterator *gsi) { gimple *stmt = gsi_stmt (*gsi); location_t loc = gimple_location (stmt); tree cont_label = create_artificial_label (loc); tree next_label = create_artificial_label (loc); tree dest, t, arg; gimple *g; /* __builtin_setjmp_{setup,receiver} aren't ECF_RETURNS_TWICE and for RTL these builtins are modelled as non-local label jumps to the label that is passed to these two builtins, so pretend we have a non-local label during GIMPLE passes too. See PR60003. */ cfun->has_nonlocal_label = 1; /* NEXT_LABEL is the label __builtin_longjmp will jump to. Its address is passed to both __builtin_setjmp_setup and __builtin_setjmp_receiver. */ FORCED_LABEL (next_label) = 1; tree orig_dest = dest = gimple_call_lhs (stmt); if (orig_dest && TREE_CODE (orig_dest) == SSA_NAME) dest = create_tmp_reg (TREE_TYPE (orig_dest)); /* Build '__builtin_setjmp_setup (BUF, NEXT_LABEL)' and insert. */ arg = build_addr (next_label); t = builtin_decl_implicit (BUILT_IN_SETJMP_SETUP); g = gimple_build_call (t, 2, gimple_call_arg (stmt, 0), arg); gimple_set_location (g, loc); gimple_set_block (g, gimple_block (stmt)); gsi_insert_before (gsi, g, GSI_SAME_STMT); /* Build 'DEST = 0' and insert. */ if (dest) { g = gimple_build_assign (dest, build_zero_cst (TREE_TYPE (dest))); gimple_set_location (g, loc); gimple_set_block (g, gimple_block (stmt)); gsi_insert_before (gsi, g, GSI_SAME_STMT); } /* Build 'goto CONT_LABEL' and insert. */ g = gimple_build_goto (cont_label); gsi_insert_before (gsi, g, GSI_SAME_STMT); /* Build 'NEXT_LABEL:' and insert. */ g = gimple_build_label (next_label); gsi_insert_before (gsi, g, GSI_SAME_STMT); /* Build '__builtin_setjmp_receiver (NEXT_LABEL)' and insert. */ arg = build_addr (next_label); t = builtin_decl_implicit (BUILT_IN_SETJMP_RECEIVER); g = gimple_build_call (t, 1, arg); gimple_set_location (g, loc); gimple_set_block (g, gimple_block (stmt)); gsi_insert_before (gsi, g, GSI_SAME_STMT); /* Build 'DEST = 1' and insert. */ if (dest) { g = gimple_build_assign (dest, fold_convert_loc (loc, TREE_TYPE (dest), integer_one_node)); gimple_set_location (g, loc); gimple_set_block (g, gimple_block (stmt)); gsi_insert_before (gsi, g, GSI_SAME_STMT); } /* Build 'CONT_LABEL:' and insert. */ g = gimple_build_label (cont_label); gsi_insert_before (gsi, g, GSI_SAME_STMT); /* Build orig_dest = dest if necessary. */ if (dest != orig_dest) { g = gimple_build_assign (orig_dest, dest); gsi_insert_before (gsi, g, GSI_SAME_STMT); } /* Remove the call to __builtin_setjmp. */ gsi_remove (gsi, false); }
static int forward_propagate_into_cond (gimple_stmt_iterator *gsi_p) { gimple stmt = gsi_stmt (*gsi_p); location_t loc = gimple_location (stmt); int did_something = 0; do { tree tmp = NULL_TREE; tree cond = gimple_assign_rhs1 (stmt); tree name, rhs0 = NULL_TREE, rhs1 = NULL_TREE; gimple def_stmt; bool single_use0_p = false, single_use1_p = false; /* We can do tree combining on SSA_NAME and comparison expressions. */ if (COMPARISON_CLASS_P (cond) && TREE_CODE (TREE_OPERAND (cond, 0)) == SSA_NAME) { /* For comparisons use the first operand, that is likely to simplify comparisons against constants. */ name = TREE_OPERAND (cond, 0); def_stmt = get_prop_source_stmt (name, false, &single_use0_p); if (def_stmt && can_propagate_from (def_stmt)) { tree op1 = TREE_OPERAND (cond, 1); rhs0 = rhs_to_tree (TREE_TYPE (op1), def_stmt); tmp = combine_cond_expr_cond (loc, TREE_CODE (cond), boolean_type_node, rhs0, op1, !single_use0_p); } /* If that wasn't successful, try the second operand. */ if (tmp == NULL_TREE && TREE_CODE (TREE_OPERAND (cond, 1)) == SSA_NAME) { tree op0 = TREE_OPERAND (cond, 0); name = TREE_OPERAND (cond, 1); def_stmt = get_prop_source_stmt (name, false, &single_use1_p); if (!def_stmt || !can_propagate_from (def_stmt)) return did_something; rhs1 = rhs_to_tree (TREE_TYPE (op0), def_stmt); tmp = combine_cond_expr_cond (loc, TREE_CODE (cond), boolean_type_node, op0, rhs1, !single_use1_p); } /* If that wasn't successful either, try both operands. */ if (tmp == NULL_TREE && rhs0 != NULL_TREE && rhs1 != NULL_TREE) tmp = combine_cond_expr_cond (loc, TREE_CODE (cond), boolean_type_node, rhs0, fold_convert_loc (loc, TREE_TYPE (rhs0), rhs1), !(single_use0_p && single_use1_p)); } else if (TREE_CODE (cond) == SSA_NAME) { name = cond; def_stmt = get_prop_source_stmt (name, true, NULL); if (def_stmt || !can_propagate_from (def_stmt)) return did_something; rhs0 = gimple_assign_rhs1 (def_stmt); tmp = combine_cond_expr_cond (loc, NE_EXPR, boolean_type_node, rhs0, build_int_cst (TREE_TYPE (rhs0), 0), false); } if (tmp) { if (dump_file && tmp) { fprintf (dump_file, " Replaced '"); print_generic_expr (dump_file, cond, 0); fprintf (dump_file, "' with '"); print_generic_expr (dump_file, tmp, 0); fprintf (dump_file, "'\n"); } gimple_assign_set_rhs_from_tree (gsi_p, unshare_expr (tmp)); stmt = gsi_stmt (*gsi_p); update_stmt (stmt); /* Remove defining statements. */ remove_prop_source_from_use (name, NULL); if (is_gimple_min_invariant (tmp)) did_something = 2; else if (did_something == 0) did_something = 1; /* Continue combining. */ continue; } break; } while (1); return did_something; }
static tree c_fully_fold_internal (tree expr, bool in_init, bool *maybe_const_operands, bool *maybe_const_itself, bool for_int_const) { tree ret = expr; enum tree_code code = TREE_CODE (expr); enum tree_code_class kind = TREE_CODE_CLASS (code); location_t loc = EXPR_LOCATION (expr); tree op0, op1, op2, op3; tree orig_op0, orig_op1, orig_op2; bool op0_const = true, op1_const = true, op2_const = true; bool op0_const_self = true, op1_const_self = true, op2_const_self = true; bool nowarning = TREE_NO_WARNING (expr); bool unused_p; source_range old_range; /* Constants, declarations, statements, errors, SAVE_EXPRs and anything else not counted as an expression cannot usefully be folded further at this point. */ if (!IS_EXPR_CODE_CLASS (kind) || kind == tcc_statement || code == SAVE_EXPR) return expr; if (IS_EXPR_CODE_CLASS (kind)) old_range = EXPR_LOCATION_RANGE (expr); /* Operands of variable-length expressions (function calls) have already been folded, as have __builtin_* function calls, and such expressions cannot occur in constant expressions. */ if (kind == tcc_vl_exp) { *maybe_const_operands = false; ret = fold (expr); goto out; } if (code == C_MAYBE_CONST_EXPR) { tree pre = C_MAYBE_CONST_EXPR_PRE (expr); tree inner = C_MAYBE_CONST_EXPR_EXPR (expr); if (C_MAYBE_CONST_EXPR_NON_CONST (expr)) *maybe_const_operands = false; if (C_MAYBE_CONST_EXPR_INT_OPERANDS (expr)) { *maybe_const_itself = false; inner = c_fully_fold_internal (inner, in_init, maybe_const_operands, maybe_const_itself, true); } if (pre && !in_init) ret = build2 (COMPOUND_EXPR, TREE_TYPE (expr), pre, inner); else ret = inner; goto out; } /* Assignment, increment, decrement, function call and comma operators, and statement expressions, cannot occur in constant expressions if evaluated / outside of sizeof. (Function calls were handled above, though VA_ARG_EXPR is treated like a function call here, and statement expressions are handled through C_MAYBE_CONST_EXPR to avoid folding inside them.) */ switch (code) { case MODIFY_EXPR: case PREDECREMENT_EXPR: case PREINCREMENT_EXPR: case POSTDECREMENT_EXPR: case POSTINCREMENT_EXPR: case COMPOUND_EXPR: *maybe_const_operands = false; break; case VA_ARG_EXPR: case TARGET_EXPR: case BIND_EXPR: case OBJ_TYPE_REF: *maybe_const_operands = false; ret = fold (expr); goto out; default: break; } /* Fold individual tree codes as appropriate. */ switch (code) { case COMPOUND_LITERAL_EXPR: /* Any non-constancy will have been marked in a containing C_MAYBE_CONST_EXPR; there is no more folding to do here. */ goto out; case COMPONENT_REF: orig_op0 = op0 = TREE_OPERAND (expr, 0); op1 = TREE_OPERAND (expr, 1); op2 = TREE_OPERAND (expr, 2); op0 = c_fully_fold_internal (op0, in_init, maybe_const_operands, maybe_const_itself, for_int_const); STRIP_TYPE_NOPS (op0); if (op0 != orig_op0) ret = build3 (COMPONENT_REF, TREE_TYPE (expr), op0, op1, op2); if (ret != expr) { TREE_READONLY (ret) = TREE_READONLY (expr); TREE_THIS_VOLATILE (ret) = TREE_THIS_VOLATILE (expr); } goto out; case ARRAY_REF: orig_op0 = op0 = TREE_OPERAND (expr, 0); orig_op1 = op1 = TREE_OPERAND (expr, 1); op2 = TREE_OPERAND (expr, 2); op3 = TREE_OPERAND (expr, 3); op0 = c_fully_fold_internal (op0, in_init, maybe_const_operands, maybe_const_itself, for_int_const); STRIP_TYPE_NOPS (op0); op1 = c_fully_fold_internal (op1, in_init, maybe_const_operands, maybe_const_itself, for_int_const); STRIP_TYPE_NOPS (op1); op1 = decl_constant_value_for_optimization (op1); if (op0 != orig_op0 || op1 != orig_op1) ret = build4 (ARRAY_REF, TREE_TYPE (expr), op0, op1, op2, op3); if (ret != expr) { TREE_READONLY (ret) = TREE_READONLY (expr); TREE_SIDE_EFFECTS (ret) = TREE_SIDE_EFFECTS (expr); TREE_THIS_VOLATILE (ret) = TREE_THIS_VOLATILE (expr); } ret = fold (ret); goto out; case COMPOUND_EXPR: case MODIFY_EXPR: case PREDECREMENT_EXPR: case PREINCREMENT_EXPR: case POSTDECREMENT_EXPR: case POSTINCREMENT_EXPR: case PLUS_EXPR: case MINUS_EXPR: case MULT_EXPR: case POINTER_PLUS_EXPR: case TRUNC_DIV_EXPR: case CEIL_DIV_EXPR: case FLOOR_DIV_EXPR: case TRUNC_MOD_EXPR: case RDIV_EXPR: case EXACT_DIV_EXPR: case LSHIFT_EXPR: case RSHIFT_EXPR: case BIT_IOR_EXPR: case BIT_XOR_EXPR: case BIT_AND_EXPR: case LT_EXPR: case LE_EXPR: case GT_EXPR: case GE_EXPR: case EQ_EXPR: case NE_EXPR: case COMPLEX_EXPR: case TRUTH_AND_EXPR: case TRUTH_OR_EXPR: case TRUTH_XOR_EXPR: case UNORDERED_EXPR: case ORDERED_EXPR: case UNLT_EXPR: case UNLE_EXPR: case UNGT_EXPR: case UNGE_EXPR: case UNEQ_EXPR: /* Binary operations evaluating both arguments (increment and decrement are binary internally in GCC). */ orig_op0 = op0 = TREE_OPERAND (expr, 0); orig_op1 = op1 = TREE_OPERAND (expr, 1); op0 = c_fully_fold_internal (op0, in_init, maybe_const_operands, maybe_const_itself, for_int_const); STRIP_TYPE_NOPS (op0); if (code != MODIFY_EXPR && code != PREDECREMENT_EXPR && code != PREINCREMENT_EXPR && code != POSTDECREMENT_EXPR && code != POSTINCREMENT_EXPR) op0 = decl_constant_value_for_optimization (op0); /* The RHS of a MODIFY_EXPR was fully folded when building that expression for the sake of conversion warnings. */ if (code != MODIFY_EXPR) op1 = c_fully_fold_internal (op1, in_init, maybe_const_operands, maybe_const_itself, for_int_const); STRIP_TYPE_NOPS (op1); op1 = decl_constant_value_for_optimization (op1); if (for_int_const && (TREE_CODE (op0) != INTEGER_CST || TREE_CODE (op1) != INTEGER_CST)) goto out; if (op0 != orig_op0 || op1 != orig_op1 || in_init) ret = in_init ? fold_build2_initializer_loc (loc, code, TREE_TYPE (expr), op0, op1) : fold_build2_loc (loc, code, TREE_TYPE (expr), op0, op1); else ret = fold (expr); if (TREE_OVERFLOW_P (ret) && !TREE_OVERFLOW_P (op0) && !TREE_OVERFLOW_P (op1)) overflow_warning (EXPR_LOC_OR_LOC (expr, input_location), ret); if (code == LSHIFT_EXPR && TREE_CODE (orig_op0) != INTEGER_CST && TREE_CODE (TREE_TYPE (orig_op0)) == INTEGER_TYPE && TREE_CODE (op0) == INTEGER_CST && c_inhibit_evaluation_warnings == 0 && tree_int_cst_sgn (op0) < 0) warning_at (loc, OPT_Wshift_negative_value, "left shift of negative value"); if ((code == LSHIFT_EXPR || code == RSHIFT_EXPR) && TREE_CODE (orig_op1) != INTEGER_CST && TREE_CODE (op1) == INTEGER_CST && (TREE_CODE (TREE_TYPE (orig_op0)) == INTEGER_TYPE || TREE_CODE (TREE_TYPE (orig_op0)) == FIXED_POINT_TYPE) && TREE_CODE (TREE_TYPE (orig_op1)) == INTEGER_TYPE && c_inhibit_evaluation_warnings == 0) { if (tree_int_cst_sgn (op1) < 0) warning_at (loc, OPT_Wshift_count_negative, (code == LSHIFT_EXPR ? G_("left shift count is negative") : G_("right shift count is negative"))); else if (compare_tree_int (op1, TYPE_PRECISION (TREE_TYPE (orig_op0))) >= 0) warning_at (loc, OPT_Wshift_count_overflow, (code == LSHIFT_EXPR ? G_("left shift count >= width of type") : G_("right shift count >= width of type"))); } if (code == LSHIFT_EXPR /* If either OP0 has been folded to INTEGER_CST... */ && ((TREE_CODE (orig_op0) != INTEGER_CST && TREE_CODE (TREE_TYPE (orig_op0)) == INTEGER_TYPE && TREE_CODE (op0) == INTEGER_CST) /* ...or if OP1 has been folded to INTEGER_CST... */ || (TREE_CODE (orig_op1) != INTEGER_CST && TREE_CODE (TREE_TYPE (orig_op1)) == INTEGER_TYPE && TREE_CODE (op1) == INTEGER_CST)) && c_inhibit_evaluation_warnings == 0) /* ...then maybe we can detect an overflow. */ maybe_warn_shift_overflow (loc, op0, op1); if ((code == TRUNC_DIV_EXPR || code == CEIL_DIV_EXPR || code == FLOOR_DIV_EXPR || code == EXACT_DIV_EXPR || code == TRUNC_MOD_EXPR) && TREE_CODE (orig_op1) != INTEGER_CST && TREE_CODE (op1) == INTEGER_CST && (TREE_CODE (TREE_TYPE (orig_op0)) == INTEGER_TYPE || TREE_CODE (TREE_TYPE (orig_op0)) == FIXED_POINT_TYPE) && TREE_CODE (TREE_TYPE (orig_op1)) == INTEGER_TYPE) warn_for_div_by_zero (loc, op1); goto out; case INDIRECT_REF: case FIX_TRUNC_EXPR: case FLOAT_EXPR: CASE_CONVERT: case ADDR_SPACE_CONVERT_EXPR: case VIEW_CONVERT_EXPR: case NON_LVALUE_EXPR: case NEGATE_EXPR: case BIT_NOT_EXPR: case TRUTH_NOT_EXPR: case ADDR_EXPR: case CONJ_EXPR: case REALPART_EXPR: case IMAGPART_EXPR: /* Unary operations. */ orig_op0 = op0 = TREE_OPERAND (expr, 0); op0 = c_fully_fold_internal (op0, in_init, maybe_const_operands, maybe_const_itself, for_int_const); STRIP_TYPE_NOPS (op0); if (code != ADDR_EXPR && code != REALPART_EXPR && code != IMAGPART_EXPR) op0 = decl_constant_value_for_optimization (op0); if (for_int_const && TREE_CODE (op0) != INTEGER_CST) goto out; /* ??? Cope with user tricks that amount to offsetof. The middle-end is not prepared to deal with them if they occur in initializers. */ if (op0 != orig_op0 && code == ADDR_EXPR && (op1 = get_base_address (op0)) != NULL_TREE && INDIRECT_REF_P (op1) && TREE_CONSTANT (TREE_OPERAND (op1, 0))) ret = fold_convert_loc (loc, TREE_TYPE (expr), fold_offsetof_1 (op0)); else if (op0 != orig_op0 || in_init) ret = in_init ? fold_build1_initializer_loc (loc, code, TREE_TYPE (expr), op0) : fold_build1_loc (loc, code, TREE_TYPE (expr), op0); else ret = fold (expr); if (code == INDIRECT_REF && ret != expr && INDIRECT_REF_P (ret)) { TREE_READONLY (ret) = TREE_READONLY (expr); TREE_SIDE_EFFECTS (ret) = TREE_SIDE_EFFECTS (expr); TREE_THIS_VOLATILE (ret) = TREE_THIS_VOLATILE (expr); } switch (code) { case FIX_TRUNC_EXPR: case FLOAT_EXPR: CASE_CONVERT: /* Don't warn about explicit conversions. We will already have warned about suspect implicit conversions. */ break; default: if (TREE_OVERFLOW_P (ret) && !TREE_OVERFLOW_P (op0)) overflow_warning (EXPR_LOCATION (expr), ret); break; } goto out; case TRUTH_ANDIF_EXPR: case TRUTH_ORIF_EXPR: /* Binary operations not necessarily evaluating both arguments. */ orig_op0 = op0 = TREE_OPERAND (expr, 0); orig_op1 = op1 = TREE_OPERAND (expr, 1); op0 = c_fully_fold_internal (op0, in_init, &op0_const, &op0_const_self, for_int_const); STRIP_TYPE_NOPS (op0); unused_p = (op0 == (code == TRUTH_ANDIF_EXPR ? truthvalue_false_node : truthvalue_true_node)); c_disable_warnings (unused_p); op1 = c_fully_fold_internal (op1, in_init, &op1_const, &op1_const_self, for_int_const); STRIP_TYPE_NOPS (op1); c_enable_warnings (unused_p); if (for_int_const && (TREE_CODE (op0) != INTEGER_CST /* Require OP1 be an INTEGER_CST only if it's evaluated. */ || (!unused_p && TREE_CODE (op1) != INTEGER_CST))) goto out; if (op0 != orig_op0 || op1 != orig_op1 || in_init) ret = in_init ? fold_build2_initializer_loc (loc, code, TREE_TYPE (expr), op0, op1) : fold_build2_loc (loc, code, TREE_TYPE (expr), op0, op1); else ret = fold (expr); *maybe_const_operands &= op0_const; *maybe_const_itself &= op0_const_self; if (!(flag_isoc99 && op0_const && op0_const_self && (code == TRUTH_ANDIF_EXPR ? op0 == truthvalue_false_node : op0 == truthvalue_true_node))) *maybe_const_operands &= op1_const; if (!(op0_const && op0_const_self && (code == TRUTH_ANDIF_EXPR ? op0 == truthvalue_false_node : op0 == truthvalue_true_node))) *maybe_const_itself &= op1_const_self; goto out; case COND_EXPR: orig_op0 = op0 = TREE_OPERAND (expr, 0); orig_op1 = op1 = TREE_OPERAND (expr, 1); orig_op2 = op2 = TREE_OPERAND (expr, 2); op0 = c_fully_fold_internal (op0, in_init, &op0_const, &op0_const_self, for_int_const); STRIP_TYPE_NOPS (op0); c_disable_warnings (op0 == truthvalue_false_node); op1 = c_fully_fold_internal (op1, in_init, &op1_const, &op1_const_self, for_int_const); STRIP_TYPE_NOPS (op1); c_enable_warnings (op0 == truthvalue_false_node); c_disable_warnings (op0 == truthvalue_true_node); op2 = c_fully_fold_internal (op2, in_init, &op2_const, &op2_const_self, for_int_const); STRIP_TYPE_NOPS (op2); c_enable_warnings (op0 == truthvalue_true_node); if (for_int_const && (TREE_CODE (op0) != INTEGER_CST /* Only the evaluated operand must be an INTEGER_CST. */ || (op0 == truthvalue_true_node ? TREE_CODE (op1) != INTEGER_CST : TREE_CODE (op2) != INTEGER_CST))) goto out; if (op0 != orig_op0 || op1 != orig_op1 || op2 != orig_op2) ret = fold_build3_loc (loc, code, TREE_TYPE (expr), op0, op1, op2); else ret = fold (expr); *maybe_const_operands &= op0_const; *maybe_const_itself &= op0_const_self; if (!(flag_isoc99 && op0_const && op0_const_self && op0 == truthvalue_false_node)) *maybe_const_operands &= op1_const; if (!(op0_const && op0_const_self && op0 == truthvalue_false_node)) *maybe_const_itself &= op1_const_self; if (!(flag_isoc99 && op0_const && op0_const_self && op0 == truthvalue_true_node)) *maybe_const_operands &= op2_const; if (!(op0_const && op0_const_self && op0 == truthvalue_true_node)) *maybe_const_itself &= op2_const_self; goto out; case VEC_COND_EXPR: orig_op0 = op0 = TREE_OPERAND (expr, 0); orig_op1 = op1 = TREE_OPERAND (expr, 1); orig_op2 = op2 = TREE_OPERAND (expr, 2); op0 = c_fully_fold_internal (op0, in_init, maybe_const_operands, maybe_const_itself, for_int_const); STRIP_TYPE_NOPS (op0); op1 = c_fully_fold_internal (op1, in_init, maybe_const_operands, maybe_const_itself, for_int_const); STRIP_TYPE_NOPS (op1); op2 = c_fully_fold_internal (op2, in_init, maybe_const_operands, maybe_const_itself, for_int_const); STRIP_TYPE_NOPS (op2); if (op0 != orig_op0 || op1 != orig_op1 || op2 != orig_op2) ret = fold_build3_loc (loc, code, TREE_TYPE (expr), op0, op1, op2); else ret = fold (expr); goto out; case EXCESS_PRECISION_EXPR: /* Each case where an operand with excess precision may be encountered must remove the EXCESS_PRECISION_EXPR around inner operands and possibly put one around the whole expression or possibly convert to the semantic type (which c_fully_fold does); we cannot tell at this stage which is appropriate in any particular case. */ gcc_unreachable (); default: /* Various codes may appear through folding built-in functions and their arguments. */ goto out; } out: /* Some folding may introduce NON_LVALUE_EXPRs; all lvalue checks have been done by this point, so remove them again. */ nowarning |= TREE_NO_WARNING (ret); STRIP_TYPE_NOPS (ret); if (nowarning && !TREE_NO_WARNING (ret)) { if (!CAN_HAVE_LOCATION_P (ret)) ret = build1 (NOP_EXPR, TREE_TYPE (ret), ret); TREE_NO_WARNING (ret) = 1; } if (ret != expr) { protected_set_expr_location (ret, loc); if (IS_EXPR_CODE_CLASS (kind)) set_source_range (ret, old_range.m_start, old_range.m_finish); } return ret; }
tree convert (tree type, tree expr) { tree e = expr; enum tree_code code = TREE_CODE (type); const char *invalid_conv_diag; tree ret; location_t loc = EXPR_LOCATION (expr); if (type == error_mark_node || expr == error_mark_node || TREE_TYPE (expr) == error_mark_node) return error_mark_node; if ((invalid_conv_diag = targetm.invalid_conversion (TREE_TYPE (expr), type))) { error (invalid_conv_diag); return error_mark_node; } if (type == TREE_TYPE (expr)) return expr; ret = targetm.convert_to_type (type, expr); if (ret) return ret; STRIP_TYPE_NOPS (e); if (TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (TREE_TYPE (expr)) && (TREE_CODE (TREE_TYPE (expr)) != COMPLEX_TYPE || TREE_CODE (e) == COMPLEX_EXPR)) return fold_convert_loc (loc, type, expr); if (TREE_CODE (TREE_TYPE (expr)) == ERROR_MARK) return error_mark_node; if (TREE_CODE (TREE_TYPE (expr)) == VOID_TYPE) { error ("void value not ignored as it ought to be"); return error_mark_node; } switch (code) { case VOID_TYPE: return fold_convert_loc (loc, type, e); case INTEGER_TYPE: case ENUMERAL_TYPE: ret = convert_to_integer (type, e); goto maybe_fold; case BOOLEAN_TYPE: return fold_convert_loc (loc, type, c_objc_common_truthvalue_conversion (input_location, expr)); case POINTER_TYPE: case REFERENCE_TYPE: ret = convert_to_pointer (type, e); goto maybe_fold; case REAL_TYPE: ret = convert_to_real (type, e); goto maybe_fold; case FIXED_POINT_TYPE: ret = convert_to_fixed (type, e); goto maybe_fold; case COMPLEX_TYPE: /* If converting from COMPLEX_TYPE to a different COMPLEX_TYPE and e is not COMPLEX_EXPR, convert_to_complex uses save_expr, but for the C FE c_save_expr needs to be called instead. */ if (TREE_CODE (TREE_TYPE (e)) == COMPLEX_TYPE) { if (TREE_CODE (e) != COMPLEX_EXPR) { tree subtype = TREE_TYPE (type); tree elt_type = TREE_TYPE (TREE_TYPE (e)); if (in_late_binary_op) e = save_expr (e); else e = c_save_expr (e); ret = fold_build2_loc (loc, COMPLEX_EXPR, type, convert (subtype, fold_build1 (REALPART_EXPR, elt_type, e)), convert (subtype, fold_build1 (IMAGPART_EXPR, elt_type, e))); goto maybe_fold; } } ret = convert_to_complex (type, e); goto maybe_fold; case VECTOR_TYPE: ret = convert_to_vector (type, e); goto maybe_fold; case RECORD_TYPE: case UNION_TYPE: if (lang_hooks.types_compatible_p (type, TREE_TYPE (expr))) return e; break; default: break; maybe_fold: if (TREE_CODE (ret) != C_MAYBE_CONST_EXPR) ret = fold (ret); return ret; } error ("conversion to non-scalar type requested"); return error_mark_node; }
/* The method walks the node hierarchy to the topmost node. This is exactly how its done in mudflap and has been borrowed. */ static tree mf_walk_comp_ref(tree *tp, tree type, location_t location, \ tree *addr_store, tree *base_store) { tree var, t, addr, base, size; t = *tp; int component_ref_only = (TREE_CODE (t) == COMPONENT_REF); /* If we have a bitfield component reference, we must note the innermost addressable object in ELT, from which we will construct the byte-addressable bounds of the bitfield. */ tree elt = NULL_TREE; int bitfield_ref_p = (TREE_CODE (t) == COMPONENT_REF && DECL_BIT_FIELD_TYPE (TREE_OPERAND (t, 1))); /* Iterate to the top of the ARRAY_REF/COMPONENT_REF containment hierarchy to find the outermost VAR_DECL. */ var = TREE_OPERAND (t, 0); while (1) { if (bitfield_ref_p && elt == NULL_TREE && (TREE_CODE (var) == ARRAY_REF || TREE_CODE (var) == COMPONENT_REF)) elt = var; if (TREE_CODE (var) == ARRAY_REF) { component_ref_only = 0; var = TREE_OPERAND (var, 0); } else if (TREE_CODE (var) == COMPONENT_REF) var = TREE_OPERAND (var, 0); else if (INDIRECT_REF_P (var) || TREE_CODE (var) == MEM_REF) { base = TREE_OPERAND (var, 0); break; } else if (TREE_CODE (var) == VIEW_CONVERT_EXPR) { var = TREE_OPERAND (var, 0); if (CONSTANT_CLASS_P (var) && TREE_CODE (var) != STRING_CST) return NULL_TREE; } else { DEBUGLOG("TREE_CODE(temp) : %s comp_ref_only = %d eligigle = %d\n", \ tree_code_name[(int)TREE_CODE(var)], component_ref_only, \ mf_decl_eligible_p(var)); gcc_assert (TREE_CODE (var) == VAR_DECL || TREE_CODE (var) == SSA_NAME /* TODO: Check this */ || TREE_CODE (var) == PARM_DECL || TREE_CODE (var) == RESULT_DECL || TREE_CODE (var) == STRING_CST); /* Don't instrument this access if the underlying variable is not "eligible". This test matches those arrays that have only known-valid indexes, and thus are not labeled TREE_ADDRESSABLE. */ if (! mf_decl_eligible_p (var)) //TODO is this needed? || component_ref_only) return NULL_TREE; else { base = build1 (ADDR_EXPR, build_pointer_type (TREE_TYPE (var)), var); break; } } } /* Handle the case of ordinary non-indirection structure accesses. These have only nested COMPONENT_REF nodes (no INDIRECT_REF), but pass through the above filter loop. Note that it's possible for such a struct variable to match the eligible_p test because someone else might take its address sometime. */ /* We need special processing for bitfield components, because their addresses cannot be taken. */ if (bitfield_ref_p) { tree field = TREE_OPERAND (t, 1); if (TREE_CODE (DECL_SIZE_UNIT (field)) == INTEGER_CST) size = DECL_SIZE_UNIT (field); if (elt) elt = build1 (ADDR_EXPR, build_pointer_type (TREE_TYPE (elt)), elt); addr = fold_convert_loc (location, ptr_type_node, elt ? elt : base); addr = fold_build2_loc (location, POINTER_PLUS_EXPR, ptr_type_node, addr, fold_convert_loc (location, sizetype, byte_position (field))); } else addr = build1 (ADDR_EXPR, build_pointer_type (type), t); if (addr_store) *addr_store = addr; if (base_store) *base_store = addr; return var; }
static void mf_xform_derefs_1 (gimple_stmt_iterator *iter, tree *tp, location_t location, tree dirflag) { tree type, base, limit, addr, size, t; /* Don't instrument read operations. */ if (dirflag == integer_zero_node && flag_mudflap_ignore_reads) return; /* Don't instrument marked nodes. */ if (mf_marked_p (*tp)) return; t = *tp; type = TREE_TYPE (t); if (type == error_mark_node) return; size = TYPE_SIZE_UNIT (type); switch (TREE_CODE (t)) { case ARRAY_REF: case COMPONENT_REF: { /* This is trickier than it may first appear. The reason is that we are looking at expressions from the "inside out" at this point. We may have a complex nested aggregate/array expression (e.g. "a.b[i].c"), maybe with an indirection as the leftmost operator ("p->a.b.d"), where instrumentation is necessary. Or we may have an innocent "a.b.c" expression that must not be instrumented. We need to recurse all the way down the nesting structure to figure it out: looking just at the outer node is not enough. */ tree var; int component_ref_only = (TREE_CODE (t) == COMPONENT_REF); /* If we have a bitfield component reference, we must note the innermost addressable object in ELT, from which we will construct the byte-addressable bounds of the bitfield. */ tree elt = NULL_TREE; int bitfield_ref_p = (TREE_CODE (t) == COMPONENT_REF && DECL_BIT_FIELD_TYPE (TREE_OPERAND (t, 1))); /* Iterate to the top of the ARRAY_REF/COMPONENT_REF containment hierarchy to find the outermost VAR_DECL. */ var = TREE_OPERAND (t, 0); while (1) { if (bitfield_ref_p && elt == NULL_TREE && (TREE_CODE (var) == ARRAY_REF || TREE_CODE (var) == COMPONENT_REF)) elt = var; if (TREE_CODE (var) == ARRAY_REF) { component_ref_only = 0; var = TREE_OPERAND (var, 0); } else if (TREE_CODE (var) == COMPONENT_REF) var = TREE_OPERAND (var, 0); else if (INDIRECT_REF_P (var) || TREE_CODE (var) == MEM_REF) { base = TREE_OPERAND (var, 0); break; } else if (TREE_CODE (var) == VIEW_CONVERT_EXPR) { var = TREE_OPERAND (var, 0); if (CONSTANT_CLASS_P (var) && TREE_CODE (var) != STRING_CST) return; } else { gcc_assert (TREE_CODE (var) == VAR_DECL || TREE_CODE (var) == PARM_DECL || TREE_CODE (var) == RESULT_DECL || TREE_CODE (var) == STRING_CST); /* Don't instrument this access if the underlying variable is not "eligible". This test matches those arrays that have only known-valid indexes, and thus are not labeled TREE_ADDRESSABLE. */ if (! mf_decl_eligible_p (var) || component_ref_only) return; else { base = build1 (ADDR_EXPR, build_pointer_type (TREE_TYPE (var)), var); break; } } } /* Handle the case of ordinary non-indirection structure accesses. These have only nested COMPONENT_REF nodes (no INDIRECT_REF), but pass through the above filter loop. Note that it's possible for such a struct variable to match the eligible_p test because someone else might take its address sometime. */ /* We need special processing for bitfield components, because their addresses cannot be taken. */ if (bitfield_ref_p) { tree field = TREE_OPERAND (t, 1); if (TREE_CODE (DECL_SIZE_UNIT (field)) == INTEGER_CST) size = DECL_SIZE_UNIT (field); if (elt) elt = build1 (ADDR_EXPR, build_pointer_type (TREE_TYPE (elt)), elt); addr = fold_convert_loc (location, ptr_type_node, elt ? elt : base); addr = fold_build_pointer_plus_loc (location, addr, byte_position (field)); } else addr = build1 (ADDR_EXPR, build_pointer_type (type), t); limit = fold_build2_loc (location, MINUS_EXPR, mf_uintptr_type, fold_build2_loc (location, PLUS_EXPR, mf_uintptr_type, fold_convert (mf_uintptr_type, addr), size), integer_one_node); } break; case INDIRECT_REF: addr = TREE_OPERAND (t, 0); base = addr; limit = fold_build_pointer_plus_hwi_loc (location, fold_build_pointer_plus_loc (location, base, size), -1); break; case MEM_REF: if (addr_expr_of_non_mem_decl_p (TREE_OPERAND (t, 0))) return; addr = fold_build_pointer_plus_loc (location, TREE_OPERAND (t, 0), TREE_OPERAND (t, 1)); base = addr; limit = fold_build_pointer_plus_hwi_loc (location, fold_build_pointer_plus_loc (location, base, size), -1); break; case TARGET_MEM_REF: if (addr_expr_of_non_mem_decl_p (TMR_BASE (t))) return; addr = tree_mem_ref_addr (ptr_type_node, t); base = addr; limit = fold_build_pointer_plus_hwi_loc (location, fold_build_pointer_plus_loc (location, base, size), -1); break; case ARRAY_RANGE_REF: warning (OPT_Wmudflap, "mudflap checking not yet implemented for ARRAY_RANGE_REF"); return; case BIT_FIELD_REF: /* ??? merge with COMPONENT_REF code above? */ { tree ofs, rem, bpu; /* If we're not dereferencing something, then the access must be ok. */ if (TREE_CODE (TREE_OPERAND (t, 0)) != INDIRECT_REF) return; bpu = bitsize_int (BITS_PER_UNIT); ofs = fold_convert (bitsizetype, TREE_OPERAND (t, 2)); rem = size_binop_loc (location, TRUNC_MOD_EXPR, ofs, bpu); ofs = size_binop_loc (location, TRUNC_DIV_EXPR, ofs, bpu); size = fold_convert (bitsizetype, TREE_OPERAND (t, 1)); size = size_binop_loc (location, PLUS_EXPR, size, rem); size = size_binop_loc (location, CEIL_DIV_EXPR, size, bpu); size = fold_convert (sizetype, size); addr = TREE_OPERAND (TREE_OPERAND (t, 0), 0); addr = fold_convert (ptr_type_node, addr); addr = fold_build_pointer_plus_loc (location, addr, ofs); base = addr; limit = fold_build_pointer_plus_hwi_loc (location, fold_build_pointer_plus_loc (location, base, size), -1); } break; default: return; } mf_build_check_statement_for (base, limit, iter, location, dirflag); }
static void mf_xform_derefs_1 (gimple_stmt_iterator *iter, tree *tp, location_t location, tree dirflag) { tree type, base=NULL_TREE, limit, addr, size, t, elt=NULL_TREE; tree temp, field, offset; bool check_red_flag = 0, instrumented = 0; tree fncall_param_val; gimple is_char_red_call; tree temp_instr, type_node; // TODO fix this to use our flag /* Don't instrument read operations. */ if (dirflag == integer_zero_node && flag_mudflap_ignore_reads) return; DEBUGLOG("TREE_CODE(t) = %s, mf_decl_eligible_p : %d\n", tree_code_name[(int)TREE_CODE(*tp)], mf_decl_eligible_p(*tp)); t = *tp; type = TREE_TYPE (t); if (type == error_mark_node) return; size = TYPE_SIZE_UNIT (type); /* Don't instrument marked nodes. */ if (mf_marked_p (t) && !mf_decl_eligible_p(t)){ DEBUGLOG("Returning Here - 1\n"); return; } if (TREE_CODE(t) == ADDR_EXPR || \ TREE_CODE(t) == COMPONENT_REF || \ TREE_CODE(t) == ARRAY_REF || \ (TREE_CODE(t) == VAR_DECL && mf_decl_eligible_p(t))) { DEBUGLOG("------ INSTRUMENTING NODES ---------\n"); temp = TREE_OPERAND(t, 0); if(temp && (TREE_CODE(temp) == STRING_CST || \ TREE_CODE(temp) == FUNCTION_DECL)) // TODO Check this out? What do you do in this case? return; DEBUGLOG("TREE_CODE(temp) : %s\n", tree_code_name[(int)TREE_CODE(temp)]); if (TREE_CODE(t) == VAR_DECL) *tp = mf_walk_n_instrument(tp, &instrumented); else TREE_OPERAND(t,0) = mf_walk_n_instrument(&(TREE_OPERAND(t,0)), &instrumented); if (TREE_CODE(t) == ADDR_EXPR) return; } DEBUGLOG("Pass2 derefs: entering deref section\n"); type_node = NULL_TREE; //TODO move this to appropriate cases t = *tp; switch (TREE_CODE (t)) { case ARRAY_REF: case COMPONENT_REF: // TODO check if following works for comp refs { DEBUGLOG("------ INSIDE CASE COMPONENT_REF ---------\n"); HOST_WIDE_INT bitsize, bitpos; tree inner, offset; int volatilep, unsignedp; enum machine_mode mode1; check_red_flag = 1; inner = get_inner_reference (t, &bitsize, &bitpos, &offset, &mode1, &unsignedp, &volatilep, false); if (!offset) offset = size_zero_node; offset = size_binop (PLUS_EXPR, offset, size_int (bitpos / BITS_PER_UNIT)); addr = fold_build2_loc (location, POINTER_PLUS_EXPR, ptr_type_node, build1 (ADDR_EXPR, build_pointer_type(type), inner), offset); break; // TODO continue? } case INDIRECT_REF: DEBUGLOG("------ INSIDE CASE INDIRECT_REF ---------\n"); check_red_flag = 1; addr = TREE_OPERAND (t, 0); break; // TODO continue? case MEM_REF: DEBUGLOG("------ INSIDE CASE MEM_REF ---------\n"); check_red_flag = 1; addr = fold_build2_loc (location, POINTER_PLUS_EXPR, TREE_TYPE (TREE_OPERAND (t, 0)), TREE_OPERAND (t, 0), fold_convert (sizetype, TREE_OPERAND (t, 1))); break; case TARGET_MEM_REF: DEBUGLOG("------ INSIDE CASE TARGET_MEM_REF ---------\n"); check_red_flag = 1; addr = tree_mem_ref_addr (ptr_type_node, t); break; // TODO do you want to do this case? find out what it does. case ARRAY_RANGE_REF: DEBUGLOG("------ INSIDE CASE ARRAY_RANGE_REF ---------\n"); DEBUGLOG("------ TODO not handled yet---------\n"); return; case BIT_FIELD_REF: DEBUGLOG("------ INSIDE CASE BIT_FIELD_REF ---------\n"); DEBUGLOG("------ TODO not handled yet---------\n"); return; default: DEBUGLOG("------ INSIDE CASE DEFAULT ---------\n"); if(mf_decl_eligible_p(t)) { DEBUGLOG("Do you want to be here?\n"); return; /*if((*tp = mx_xform_instrument_pass2(t)) == NULL_TREE){ DEBUGLOG("Failed to set tree operand\n"); return; }*/ } } // Add the call to is_char_red if (check_red_flag) { DEBUGLOG("Entering is_char_red\n"); fncall_param_val = fold_build2_loc (location, MEM_REF, ptr_type_node, addr, \ build_int_cst(build_pointer_type(type), 0)); fncall_param_val = fold_convert_loc (location, unsigned_type_node, fncall_param_val); is_char_red_call = gimple_build_call (lbc_is_char_red_fndecl, 3, fncall_param_val, size, \ fold_convert_loc(location, ptr_type_node, addr)); gimple_set_location (is_char_red_call, location); //debug_gimple_stmt(is_char_red_call); gsi_insert_before (iter, is_char_red_call, GSI_SAME_STMT); DEBUGLOG("Done with is_char_red\n"); } DEBUGLOG("Exiting derefs \n"); }