bool gimple_can_coalesce_p (tree name1, tree name2) { /* First check the SSA_NAME's associated DECL. We only want to coalesce if they have the same DECL or both have no associated DECL. */ tree var1 = SSA_NAME_VAR (name1); tree var2 = SSA_NAME_VAR (name2); var1 = (var1 && (!VAR_P (var1) || !DECL_IGNORED_P (var1))) ? var1 : NULL_TREE; var2 = (var2 && (!VAR_P (var2) || !DECL_IGNORED_P (var2))) ? var2 : NULL_TREE; if (var1 != var2) return false; /* Now check the types. If the types are the same, then we should try to coalesce V1 and V2. */ tree t1 = TREE_TYPE (name1); tree t2 = TREE_TYPE (name2); if (t1 == t2) return true; /* If the types are not the same, check for a canonical type match. This (for example) allows coalescing when the types are fundamentally the same, but just have different names. Note pointer types with different address spaces may have the same canonical type. Those are rejected for coalescing by the types_compatible_p check. */ if (TYPE_CANONICAL (t1) && TYPE_CANONICAL (t1) == TYPE_CANONICAL (t2) && types_compatible_p (t1, t2)) return true; return false; }
void mark_addressable (tree x) { while (handled_component_p (x)) x = TREE_OPERAND (x, 0); if (TREE_CODE (x) == MEM_REF && TREE_CODE (TREE_OPERAND (x, 0)) == ADDR_EXPR) x = TREE_OPERAND (TREE_OPERAND (x, 0), 0); if (!VAR_P (x) && TREE_CODE (x) != PARM_DECL && TREE_CODE (x) != RESULT_DECL) return; mark_addressable_1 (x); /* Also mark the artificial SSA_NAME that points to the partition of X. */ if (TREE_CODE (x) == VAR_DECL && !DECL_EXTERNAL (x) && !TREE_STATIC (x) && cfun->gimple_df != NULL && cfun->gimple_df->decls_to_pointers != NULL) { tree *namep = cfun->gimple_df->decls_to_pointers->get (x); if (namep) mark_addressable_1 (*namep); } }
bool is_capture_proxy (tree decl) { return (VAR_P (decl) && DECL_HAS_VALUE_EXPR_P (decl) && !DECL_ANON_UNION_VAR_P (decl) && LAMBDA_FUNCTION_P (DECL_CONTEXT (decl))); }
static inline void make_triplet_val_inv (tree *value) { if (TREE_CODE (*value) != INTEGER_CST && TREE_CODE (*value) != PARM_DECL && !VAR_P (*value)) *value = get_temp_regvar (ptrdiff_type_node, *value); }
bool cxx_warn_unused_global_decl (const_tree decl) { if (TREE_CODE (decl) == FUNCTION_DECL && DECL_DECLARED_INLINE_P (decl)) return false; if (DECL_IN_SYSTEM_HEADER (decl)) return false; /* Const variables take the place of #defines in C++. */ if (VAR_P (decl) && TREE_READONLY (decl)) return false; return true; }
static bool omp_var_to_track (tree decl) { tree type = TREE_TYPE (decl); if (is_invisiref_parm (decl)) type = TREE_TYPE (type); while (TREE_CODE (type) == ARRAY_TYPE) type = TREE_TYPE (type); if (type == error_mark_node || !CLASS_TYPE_P (type)) return false; if (VAR_P (decl) && DECL_THREAD_LOCAL_P (decl)) return false; if (cxx_omp_predetermined_sharing (decl) != OMP_CLAUSE_DEFAULT_UNSPECIFIED) return false; return true; }
tree decl_constant_value_for_optimization (tree exp) { tree ret; if (!optimize || !VAR_P (exp) || TREE_CODE (TREE_TYPE (exp)) == ARRAY_TYPE || DECL_MODE (exp) == BLKmode) return exp; ret = decl_constant_value (exp); /* Avoid unwanted tree sharing between the initializer and current function's body where the tree can be modified e.g. by the gimplifier. */ if (ret != exp && TREE_STATIC (exp)) ret = unshare_expr (ret); return ret; }
void record_vars_into (tree vars, tree fn) { for (; vars; vars = DECL_CHAIN (vars)) { tree var = vars; /* BIND_EXPRs contains also function/type/constant declarations we don't need to care about. */ if (!VAR_P (var)) continue; /* Nothing to do in this case. */ if (DECL_EXTERNAL (var)) continue; /* Record the variable. */ add_local_decl (DECL_STRUCT_FUNCTION (fn), var); } }
static void create_parm_list (struct wrapper_data *wd, tree *val0, tree arg) { tree val = *val0; tree parm; if (val == error_mark_node || val == arg) return; if (TREE_CODE (val) == PAREN_EXPR) { /* We should not reach here with a register receiver. We may see a register variable modified in the argument list. Because register variables are worker-local we don't need to work hard to support them in code that spawns. */ if (VAR_P (arg) && DECL_HARD_REGISTER (arg)) { error_at (EXPR_LOCATION (arg), "explicit register variable %qD may not be modified in " "spawn", arg); arg = null_pointer_node; } else arg = build1 (ADDR_EXPR, build_pointer_type (TREE_TYPE (arg)), arg); val = TREE_OPERAND (val, 0); *val0 = val; gcc_assert (INDIRECT_REF_P (val)); parm = TREE_OPERAND (val, 0); STRIP_NOPS (parm); } else parm = val; TREE_CHAIN (parm) = wd->parms; wd->parms = parm; wd->argtypes = tree_cons (NULL_TREE, TREE_TYPE (parm), wd->argtypes); wd->arglist = tree_cons (NULL_TREE, arg, wd->arglist); }
tree build_throw (tree exp) { if (exp == error_mark_node) return exp; if (processing_template_decl) { if (cfun) current_function_returns_abnormally = 1; exp = build_min (THROW_EXPR, void_type_node, exp); SET_EXPR_LOCATION (exp, input_location); return exp; } if (exp && null_node_p (exp)) warning (0, "throwing NULL, which has integral, not pointer type"); if (exp != NULL_TREE) { if (!is_admissible_throw_operand_or_catch_parameter (exp, true)) return error_mark_node; } if (! doing_eh ()) return error_mark_node; if (exp) { tree throw_type; tree temp_type; tree cleanup; tree object, ptr; tree tmp; tree allocate_expr; /* The CLEANUP_TYPE is the internal type of a destructor. */ if (!cleanup_type) { tmp = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE); cleanup_type = build_pointer_type (tmp); } if (!throw_fn) { tree name = get_identifier ("__cxa_throw"); throw_fn = get_global_binding (name); if (!throw_fn) { /* Declare void __cxa_throw (void*, void*, void (*)(void*)). */ /* ??? Second argument is supposed to be "std::type_info*". */ tmp = build_function_type_list (void_type_node, ptr_type_node, ptr_type_node, cleanup_type, NULL_TREE); throw_fn = push_throw_library_fn (name, tmp); if (flag_tm) { tree itm_name = get_identifier ("_ITM_cxa_throw"); tree itm_fn = get_global_binding (itm_name); if (!itm_fn) itm_fn = push_throw_library_fn (itm_name, tmp); apply_tm_attr (itm_fn, get_identifier ("transaction_pure")); record_tm_replacement (throw_fn, itm_fn); } } } /* [except.throw] A throw-expression initializes a temporary object, the type of which is determined by removing any top-level cv-qualifiers from the static type of the operand of throw and adjusting the type from "array of T" or "function return T" to "pointer to T" or "pointer to function returning T" respectively. */ temp_type = is_bitfield_expr_with_lowered_type (exp); if (!temp_type) temp_type = cv_unqualified (type_decays_to (TREE_TYPE (exp))); /* OK, this is kind of wacky. The standard says that we call terminate when the exception handling mechanism, after completing evaluation of the expression to be thrown but before the exception is caught (_except.throw_), calls a user function that exits via an uncaught exception. So we have to protect the actual initialization of the exception object with terminate(), but evaluate the expression first. Since there could be temps in the expression, we need to handle that, too. We also expand the call to __cxa_allocate_exception first (which doesn't matter, since it can't throw). */ /* Allocate the space for the exception. */ allocate_expr = do_allocate_exception (temp_type); allocate_expr = get_target_expr (allocate_expr); ptr = TARGET_EXPR_SLOT (allocate_expr); TARGET_EXPR_CLEANUP (allocate_expr) = do_free_exception (ptr); CLEANUP_EH_ONLY (allocate_expr) = 1; object = build_nop (build_pointer_type (temp_type), ptr); object = cp_build_fold_indirect_ref (object); /* And initialize the exception object. */ if (CLASS_TYPE_P (temp_type)) { int flags = LOOKUP_NORMAL | LOOKUP_ONLYCONVERTING; vec<tree, va_gc> *exp_vec; bool converted = false; /* Under C++0x [12.8/16 class.copy], a thrown lvalue is sometimes treated as an rvalue for the purposes of overload resolution to favor move constructors over copy constructors. */ if (/* Must be a local, automatic variable. */ VAR_P (exp) && DECL_CONTEXT (exp) == current_function_decl && ! TREE_STATIC (exp) /* The variable must not have the `volatile' qualifier. */ && !(cp_type_quals (TREE_TYPE (exp)) & TYPE_QUAL_VOLATILE)) { tree moved = move (exp); exp_vec = make_tree_vector_single (moved); moved = (build_special_member_call (object, complete_ctor_identifier, &exp_vec, TREE_TYPE (object), flags|LOOKUP_PREFER_RVALUE, tf_none)); release_tree_vector (exp_vec); if (moved != error_mark_node) { exp = moved; converted = true; } } /* Call the copy constructor. */ if (!converted) { exp_vec = make_tree_vector_single (exp); exp = (build_special_member_call (object, complete_ctor_identifier, &exp_vec, TREE_TYPE (object), flags, tf_warning_or_error)); release_tree_vector (exp_vec); } if (exp == error_mark_node) { error (" in thrown expression"); return error_mark_node; } } else { tmp = decay_conversion (exp, tf_warning_or_error); if (tmp == error_mark_node) return error_mark_node; exp = build2 (INIT_EXPR, temp_type, object, tmp); } /* Mark any cleanups from the initialization as MUST_NOT_THROW, since they are run after the exception object is initialized. */ cp_walk_tree_without_duplicates (&exp, wrap_cleanups_r, 0); /* Prepend the allocation. */ exp = build2 (COMPOUND_EXPR, TREE_TYPE (exp), allocate_expr, exp); /* Force all the cleanups to be evaluated here so that we don't have to do them during unwinding. */ exp = build1 (CLEANUP_POINT_EXPR, void_type_node, exp); throw_type = build_eh_type_type (prepare_eh_type (TREE_TYPE (object))); cleanup = NULL_TREE; if (type_build_dtor_call (TREE_TYPE (object))) { tree dtor_fn = lookup_fnfields (TYPE_BINFO (TREE_TYPE (object)), complete_dtor_identifier, 0); dtor_fn = BASELINK_FUNCTIONS (dtor_fn); mark_used (dtor_fn); if (TYPE_HAS_NONTRIVIAL_DESTRUCTOR (TREE_TYPE (object))) { cxx_mark_addressable (dtor_fn); /* Pretend it's a normal function. */ cleanup = build1 (ADDR_EXPR, cleanup_type, dtor_fn); } } if (cleanup == NULL_TREE) cleanup = build_int_cst (cleanup_type, 0); /* ??? Indicate that this function call throws throw_type. */ tmp = cp_build_function_call_nary (throw_fn, tf_warning_or_error, ptr, throw_type, cleanup, NULL_TREE); /* Tack on the initialization stuff. */ exp = build2 (COMPOUND_EXPR, TREE_TYPE (tmp), exp, tmp); } else { /* Rethrow current exception. */ if (!rethrow_fn) { tree name = get_identifier ("__cxa_rethrow"); rethrow_fn = get_global_binding (name); if (!rethrow_fn) /* Declare void __cxa_rethrow (void). */ rethrow_fn = push_throw_library_fn (name, build_function_type_list (void_type_node, NULL_TREE)); if (flag_tm) apply_tm_attr (rethrow_fn, get_identifier ("transaction_pure")); } /* ??? Indicate that this function call allows exceptions of the type of the enclosing catch block (if known). */ exp = cp_build_function_call_vec (rethrow_fn, NULL, tf_warning_or_error); } exp = build1 (THROW_EXPR, void_type_node, exp); SET_EXPR_LOCATION (exp, input_location); return exp; }
tree c_finish_omp_atomic (location_t loc, enum tree_code code, enum tree_code opcode, tree lhs, tree rhs, tree v, tree lhs1, tree rhs1, bool swapped, bool seq_cst) { tree x, type, addr, pre = NULL_TREE; if (lhs == error_mark_node || rhs == error_mark_node || v == error_mark_node || lhs1 == error_mark_node || rhs1 == error_mark_node) return error_mark_node; /* ??? According to one reading of the OpenMP spec, complex type are supported, but there are no atomic stores for any architecture. But at least icc 9.0 doesn't support complex types here either. And lets not even talk about vector types... */ type = TREE_TYPE (lhs); if (!INTEGRAL_TYPE_P (type) && !POINTER_TYPE_P (type) && !SCALAR_FLOAT_TYPE_P (type)) { error_at (loc, "invalid expression type for %<#pragma omp atomic%>"); return error_mark_node; } if (opcode == RDIV_EXPR) opcode = TRUNC_DIV_EXPR; /* ??? Validate that rhs does not overlap lhs. */ /* Take and save the address of the lhs. From then on we'll reference it via indirection. */ addr = build_unary_op (loc, ADDR_EXPR, lhs, 0); if (addr == error_mark_node) return error_mark_node; addr = save_expr (addr); if (TREE_CODE (addr) != SAVE_EXPR && (TREE_CODE (addr) != ADDR_EXPR || !VAR_P (TREE_OPERAND (addr, 0)))) { /* Make sure LHS is simple enough so that goa_lhs_expr_p can recognize it even after unsharing function body. */ tree var = create_tmp_var_raw (TREE_TYPE (addr)); DECL_CONTEXT (var) = current_function_decl; addr = build4 (TARGET_EXPR, TREE_TYPE (addr), var, addr, NULL, NULL); } lhs = build_indirect_ref (loc, addr, RO_NULL); if (code == OMP_ATOMIC_READ) { x = build1 (OMP_ATOMIC_READ, type, addr); SET_EXPR_LOCATION (x, loc); OMP_ATOMIC_SEQ_CST (x) = seq_cst; return build_modify_expr (loc, v, NULL_TREE, NOP_EXPR, loc, x, NULL_TREE); } /* There are lots of warnings, errors, and conversions that need to happen in the course of interpreting a statement. Use the normal mechanisms to do this, and then take it apart again. */ if (swapped) { rhs = build_binary_op (loc, opcode, rhs, lhs, 1); opcode = NOP_EXPR; } bool save = in_late_binary_op; in_late_binary_op = true; x = build_modify_expr (loc, lhs, NULL_TREE, opcode, loc, rhs, NULL_TREE); in_late_binary_op = save; if (x == error_mark_node) return error_mark_node; if (TREE_CODE (x) == COMPOUND_EXPR) { pre = TREE_OPERAND (x, 0); gcc_assert (TREE_CODE (pre) == SAVE_EXPR); x = TREE_OPERAND (x, 1); } gcc_assert (TREE_CODE (x) == MODIFY_EXPR); rhs = TREE_OPERAND (x, 1); /* Punt the actual generation of atomic operations to common code. */ if (code == OMP_ATOMIC) type = void_type_node; x = build2 (code, type, addr, rhs); SET_EXPR_LOCATION (x, loc); OMP_ATOMIC_SEQ_CST (x) = seq_cst; /* Generally it is hard to prove lhs1 and lhs are the same memory location, just diagnose different variables. */ if (rhs1 && VAR_P (rhs1) && VAR_P (lhs) && rhs1 != lhs) { if (code == OMP_ATOMIC) error_at (loc, "%<#pragma omp atomic update%> uses two different variables for memory"); else error_at (loc, "%<#pragma omp atomic capture%> uses two different variables for memory"); return error_mark_node; } if (code != OMP_ATOMIC) { /* Generally it is hard to prove lhs1 and lhs are the same memory location, just diagnose different variables. */ if (lhs1 && VAR_P (lhs1) && VAR_P (lhs)) { if (lhs1 != lhs) { error_at (loc, "%<#pragma omp atomic capture%> uses two different variables for memory"); return error_mark_node; } } x = build_modify_expr (loc, v, NULL_TREE, NOP_EXPR, loc, x, NULL_TREE); if (rhs1 && rhs1 != lhs) { tree rhs1addr = build_unary_op (loc, ADDR_EXPR, rhs1, 0); if (rhs1addr == error_mark_node) return error_mark_node; x = omit_one_operand_loc (loc, type, x, rhs1addr); } if (lhs1 && lhs1 != lhs) { tree lhs1addr = build_unary_op (loc, ADDR_EXPR, lhs1, 0); if (lhs1addr == error_mark_node) return error_mark_node; if (code == OMP_ATOMIC_CAPTURE_OLD) x = omit_one_operand_loc (loc, type, x, lhs1addr); else { x = save_expr (x); x = omit_two_operands_loc (loc, type, x, x, lhs1addr); } } } else if (rhs1 && rhs1 != lhs) { tree rhs1addr = build_unary_op (loc, ADDR_EXPR, rhs1, 0); if (rhs1addr == error_mark_node) return error_mark_node; x = omit_one_operand_loc (loc, type, x, rhs1addr); } if (pre) x = omit_one_operand_loc (loc, type, x, pre); return x; }
bool find_rank (location_t loc, tree orig_expr, tree expr, bool ignore_builtin_fn, size_t *rank) { tree ii_tree; size_t ii = 0, current_rank = 0; if (TREE_CODE (expr) == ARRAY_NOTATION_REF) { ii_tree = expr; while (ii_tree) { if (TREE_CODE (ii_tree) == ARRAY_NOTATION_REF) { current_rank++; ii_tree = ARRAY_NOTATION_ARRAY (ii_tree); } else if (handled_component_p (ii_tree) || INDIRECT_REF_P (ii_tree)) ii_tree = TREE_OPERAND (ii_tree, 0); else if (TREE_CODE (ii_tree) == PARM_DECL || VAR_P (ii_tree)) break; else gcc_unreachable (); } if (*rank == 0) /* In this case, all the expressions this function has encountered thus far have been scalars or expressions with zero rank. Please see header comment for examples of such expression. */ *rank = current_rank; else if (*rank != current_rank) { /* In this case, find rank is being recursed through a set of expression of the form A <OPERATION> B, where A and B both have array notations in them and the rank of A is not equal to rank of B. A simple example of such case is the following: X[:] + Y[:][:] */ *rank = current_rank; return false; } } else if (TREE_CODE (expr) == STATEMENT_LIST) { tree_stmt_iterator ii_tsi; for (ii_tsi = tsi_start (expr); !tsi_end_p (ii_tsi); tsi_next (&ii_tsi)) if (!find_rank (loc, orig_expr, *tsi_stmt_ptr (ii_tsi), ignore_builtin_fn, rank)) return false; } else { if (TREE_CODE (expr) == CALL_EXPR) { tree func_name = CALL_EXPR_FN (expr); tree prev_arg = NULL_TREE, arg; call_expr_arg_iterator iter; size_t prev_rank = 0; if (TREE_CODE (func_name) == ADDR_EXPR) if (!ignore_builtin_fn) if (is_cilkplus_reduce_builtin (func_name)) /* If it is a built-in function, then we know it returns a scalar. */ return true; if (!find_rank (loc, orig_expr, func_name, ignore_builtin_fn, rank)) return false; FOR_EACH_CALL_EXPR_ARG (arg, iter, expr) { if (!find_rank (loc, orig_expr, arg, ignore_builtin_fn, rank)) { if (prev_arg && EXPR_HAS_LOCATION (prev_arg) && prev_rank != *rank) error_at (EXPR_LOCATION (prev_arg), "rank mismatch between %qE and %qE", prev_arg, arg); else if (prev_arg && prev_rank != *rank) /* Here the original expression is printed as a "heads-up" to the programmer. This is because since there is no location information for the offending argument, the error could be in some internally generated code that is not visible for the programmer. Thus, the correct fix may lie in the original expression. */ error_at (loc, "rank mismatch in expression %qE", orig_expr); return false; } prev_arg = arg; prev_rank = *rank; } } else {
/* Return -1 if dwarf ATTR shouldn't be added for DECL, or the attribute value otherwise. */ int cp_decl_dwarf_attribute (const_tree decl, int attr) { if (decl == NULL_TREE) return -1; switch (attr) { case DW_AT_explicit: if (TREE_CODE (decl) == FUNCTION_DECL && DECL_LANG_SPECIFIC (STRIP_TEMPLATE (decl)) && DECL_NONCONVERTING_P (decl)) return 1; break; case DW_AT_deleted: if (TREE_CODE (decl) == FUNCTION_DECL && DECL_LANG_SPECIFIC (STRIP_TEMPLATE (decl)) && DECL_DELETED_FN (decl)) return 1; break; case DW_AT_defaulted: if (TREE_CODE (decl) == FUNCTION_DECL && DECL_LANG_SPECIFIC (STRIP_TEMPLATE (decl)) && DECL_DEFAULTED_FN (decl)) { if (DECL_DEFAULTED_IN_CLASS_P (decl)) return DW_DEFAULTED_in_class; if (DECL_DEFAULTED_OUTSIDE_CLASS_P (decl)) return DW_DEFAULTED_out_of_class; } break; case DW_AT_const_expr: if (VAR_OR_FUNCTION_DECL_P (decl) && DECL_DECLARED_CONSTEXPR_P (decl)) return 1; break; case DW_AT_reference: if (TREE_CODE (decl) == FUNCTION_DECL && DECL_NONSTATIC_MEMBER_FUNCTION_P (decl) && FUNCTION_REF_QUALIFIED (TREE_TYPE (decl)) && !FUNCTION_RVALUE_QUALIFIED (TREE_TYPE (decl))) return 1; break; case DW_AT_rvalue_reference: if (TREE_CODE (decl) == FUNCTION_DECL && DECL_NONSTATIC_MEMBER_FUNCTION_P (decl) && FUNCTION_REF_QUALIFIED (TREE_TYPE (decl)) && FUNCTION_RVALUE_QUALIFIED (TREE_TYPE (decl))) return 1; break; case DW_AT_inline: if (VAR_P (decl) && DECL_INLINE_VAR_P (decl)) { if (DECL_VAR_DECLARED_INLINE_P (decl)) return DW_INL_declared_inlined; else return DW_INL_inlined; } break; case DW_AT_export_symbols: if (TREE_CODE (decl) == NAMESPACE_DECL && (DECL_NAMESPACE_INLINE_P (decl) || (DECL_NAME (decl) == NULL_TREE && dwarf_version >= 5))) return 1; break; default: break; } return -1; }
static void output_gimple_stmt (struct output_block *ob, gimple *stmt) { unsigned i; enum gimple_code code; enum LTO_tags tag; struct bitpack_d bp; histogram_value hist; /* Emit identifying tag. */ code = gimple_code (stmt); tag = lto_gimple_code_to_tag (code); streamer_write_record_start (ob, tag); /* Emit the tuple header. */ bp = bitpack_create (ob->main_stream); bp_pack_var_len_unsigned (&bp, gimple_num_ops (stmt)); bp_pack_value (&bp, gimple_no_warning_p (stmt), 1); if (is_gimple_assign (stmt)) bp_pack_value (&bp, gimple_assign_nontemporal_move_p ( as_a <gassign *> (stmt)), 1); bp_pack_value (&bp, gimple_has_volatile_ops (stmt), 1); hist = gimple_histogram_value (cfun, stmt); bp_pack_value (&bp, hist != NULL, 1); bp_pack_var_len_unsigned (&bp, stmt->subcode); /* Emit location information for the statement. */ stream_output_location (ob, &bp, LOCATION_LOCUS (gimple_location (stmt))); streamer_write_bitpack (&bp); /* Emit the lexical block holding STMT. */ stream_write_tree (ob, gimple_block (stmt), true); /* Emit the operands. */ switch (gimple_code (stmt)) { case GIMPLE_RESX: streamer_write_hwi (ob, gimple_resx_region (as_a <gresx *> (stmt))); break; case GIMPLE_EH_MUST_NOT_THROW: stream_write_tree (ob, gimple_eh_must_not_throw_fndecl ( as_a <geh_mnt *> (stmt)), true); break; case GIMPLE_EH_DISPATCH: streamer_write_hwi (ob, gimple_eh_dispatch_region ( as_a <geh_dispatch *> (stmt))); break; case GIMPLE_ASM: { gasm *asm_stmt = as_a <gasm *> (stmt); streamer_write_uhwi (ob, gimple_asm_ninputs (asm_stmt)); streamer_write_uhwi (ob, gimple_asm_noutputs (asm_stmt)); streamer_write_uhwi (ob, gimple_asm_nclobbers (asm_stmt)); streamer_write_uhwi (ob, gimple_asm_nlabels (asm_stmt)); streamer_write_string (ob, ob->main_stream, gimple_asm_string (asm_stmt), true); } /* Fallthru */ case GIMPLE_ASSIGN: case GIMPLE_CALL: case GIMPLE_RETURN: case GIMPLE_SWITCH: case GIMPLE_LABEL: case GIMPLE_COND: case GIMPLE_GOTO: case GIMPLE_DEBUG: for (i = 0; i < gimple_num_ops (stmt); i++) { tree op = gimple_op (stmt, i); tree *basep = NULL; /* Wrap all uses of non-automatic variables inside MEM_REFs so that we do not have to deal with type mismatches on merged symbols during IL read in. The first operand of GIMPLE_DEBUG must be a decl, not MEM_REF, though. */ if (op && (i || !is_gimple_debug (stmt))) { basep = &op; if (TREE_CODE (*basep) == ADDR_EXPR) basep = &TREE_OPERAND (*basep, 0); while (handled_component_p (*basep)) basep = &TREE_OPERAND (*basep, 0); if (VAR_P (*basep) && !auto_var_in_fn_p (*basep, current_function_decl) && !DECL_REGISTER (*basep)) { bool volatilep = TREE_THIS_VOLATILE (*basep); tree ptrtype = build_pointer_type (TREE_TYPE (*basep)); *basep = build2 (MEM_REF, TREE_TYPE (*basep), build1 (ADDR_EXPR, ptrtype, *basep), build_int_cst (ptrtype, 0)); TREE_THIS_VOLATILE (*basep) = volatilep; } else basep = NULL; } stream_write_tree (ob, op, true); /* Restore the original base if we wrapped it inside a MEM_REF. */ if (basep) *basep = TREE_OPERAND (TREE_OPERAND (*basep, 0), 0); } if (is_gimple_call (stmt)) { if (gimple_call_internal_p (stmt)) streamer_write_enum (ob->main_stream, internal_fn, IFN_LAST, gimple_call_internal_fn (stmt)); else stream_write_tree (ob, gimple_call_fntype (stmt), true); } break; case GIMPLE_NOP: case GIMPLE_PREDICT: break; case GIMPLE_TRANSACTION: { gtransaction *txn = as_a <gtransaction *> (stmt); gcc_assert (gimple_transaction_body (txn) == NULL); stream_write_tree (ob, gimple_transaction_label_norm (txn), true); stream_write_tree (ob, gimple_transaction_label_uninst (txn), true); stream_write_tree (ob, gimple_transaction_label_over (txn), true); } break; default: gcc_unreachable (); } if (hist) stream_out_histogram_value (ob, hist); }
tree decl_attributes (tree *node, tree attributes, int flags) { tree a; tree returned_attrs = NULL_TREE; if (TREE_TYPE (*node) == error_mark_node || attributes == error_mark_node) return NULL_TREE; if (!attributes_initialized) init_attributes (); /* If this is a function and the user used #pragma GCC optimize, add the options to the attribute((optimize(...))) list. */ if (TREE_CODE (*node) == FUNCTION_DECL && current_optimize_pragma) { tree cur_attr = lookup_attribute ("optimize", attributes); tree opts = copy_list (current_optimize_pragma); if (! cur_attr) attributes = tree_cons (get_identifier ("optimize"), opts, attributes); else TREE_VALUE (cur_attr) = chainon (opts, TREE_VALUE (cur_attr)); } if (TREE_CODE (*node) == FUNCTION_DECL && optimization_current_node != optimization_default_node && !DECL_FUNCTION_SPECIFIC_OPTIMIZATION (*node)) DECL_FUNCTION_SPECIFIC_OPTIMIZATION (*node) = optimization_current_node; /* If this is a function and the user used #pragma GCC target, add the options to the attribute((target(...))) list. */ if (TREE_CODE (*node) == FUNCTION_DECL && current_target_pragma && targetm.target_option.valid_attribute_p (*node, NULL_TREE, current_target_pragma, 0)) { tree cur_attr = lookup_attribute ("target", attributes); tree opts = copy_list (current_target_pragma); if (! cur_attr) attributes = tree_cons (get_identifier ("target"), opts, attributes); else TREE_VALUE (cur_attr) = chainon (opts, TREE_VALUE (cur_attr)); } /* A "naked" function attribute implies "noinline" and "noclone" for those targets that support it. */ if (TREE_CODE (*node) == FUNCTION_DECL && attributes && lookup_attribute_spec (get_identifier ("naked")) && lookup_attribute ("naked", attributes) != NULL) { if (lookup_attribute ("noinline", attributes) == NULL) attributes = tree_cons (get_identifier ("noinline"), NULL, attributes); if (lookup_attribute ("noclone", attributes) == NULL) attributes = tree_cons (get_identifier ("noclone"), NULL, attributes); } targetm.insert_attributes (*node, &attributes); for (a = attributes; a; a = TREE_CHAIN (a)) { tree ns = get_attribute_namespace (a); tree name = get_attribute_name (a); tree args = TREE_VALUE (a); tree *anode = node; const struct attribute_spec *spec = lookup_scoped_attribute_spec (ns, name); bool no_add_attrs = 0; int fn_ptr_quals = 0; tree fn_ptr_tmp = NULL_TREE; if (spec == NULL) { if (!(flags & (int) ATTR_FLAG_BUILT_IN)) { if (ns == NULL_TREE || !cxx11_attribute_p (a)) warning (OPT_Wattributes, "%qE attribute directive ignored", name); else warning (OPT_Wattributes, "%<%E::%E%> scoped attribute directive ignored", ns, name); } continue; } else if (list_length (args) < spec->min_length || (spec->max_length >= 0 && list_length (args) > spec->max_length)) { error ("wrong number of arguments specified for %qE attribute", name); continue; } gcc_assert (is_attribute_p (spec->name, name)); if (TYPE_P (*node) && cxx11_attribute_p (a) && !(flags & ATTR_FLAG_TYPE_IN_PLACE)) { /* This is a c++11 attribute that appertains to a type-specifier, outside of the definition of, a class type. Ignore it. */ if (warning (OPT_Wattributes, "attribute ignored")) inform (input_location, "an attribute that appertains to a type-specifier " "is ignored"); continue; } if (spec->decl_required && !DECL_P (*anode)) { if (flags & ((int) ATTR_FLAG_DECL_NEXT | (int) ATTR_FLAG_FUNCTION_NEXT | (int) ATTR_FLAG_ARRAY_NEXT)) { /* Pass on this attribute to be tried again. */ returned_attrs = tree_cons (name, args, returned_attrs); continue; } else { warning (OPT_Wattributes, "%qE attribute does not apply to types", name); continue; } } /* If we require a type, but were passed a decl, set up to make a new type and update the one in the decl. ATTR_FLAG_TYPE_IN_PLACE would have applied if we'd been passed a type, but we cannot modify the decl's type in place here. */ if (spec->type_required && DECL_P (*anode)) { anode = &TREE_TYPE (*anode); flags &= ~(int) ATTR_FLAG_TYPE_IN_PLACE; } if (spec->function_type_required && TREE_CODE (*anode) != FUNCTION_TYPE && TREE_CODE (*anode) != METHOD_TYPE) { if (TREE_CODE (*anode) == POINTER_TYPE && (TREE_CODE (TREE_TYPE (*anode)) == FUNCTION_TYPE || TREE_CODE (TREE_TYPE (*anode)) == METHOD_TYPE)) { /* OK, this is a bit convoluted. We can't just make a copy of the pointer type and modify its TREE_TYPE, because if we change the attributes of the target type the pointer type needs to have a different TYPE_MAIN_VARIANT. So we pull out the target type now, frob it as appropriate, and rebuild the pointer type later. This would all be simpler if attributes were part of the declarator, grumble grumble. */ fn_ptr_tmp = TREE_TYPE (*anode); fn_ptr_quals = TYPE_QUALS (*anode); anode = &fn_ptr_tmp; flags &= ~(int) ATTR_FLAG_TYPE_IN_PLACE; } else if (flags & (int) ATTR_FLAG_FUNCTION_NEXT) { /* Pass on this attribute to be tried again. */ returned_attrs = tree_cons (name, args, returned_attrs); continue; } if (TREE_CODE (*anode) != FUNCTION_TYPE && TREE_CODE (*anode) != METHOD_TYPE) { warning (OPT_Wattributes, "%qE attribute only applies to function types", name); continue; } } if (TYPE_P (*anode) && (flags & (int) ATTR_FLAG_TYPE_IN_PLACE) && TYPE_SIZE (*anode) != NULL_TREE) { warning (OPT_Wattributes, "type attributes ignored after type is already defined"); continue; } if (spec->handler != NULL) { int cxx11_flag = cxx11_attribute_p (a) ? ATTR_FLAG_CXX11 : 0; returned_attrs = chainon ((*spec->handler) (anode, name, args, flags|cxx11_flag, &no_add_attrs), returned_attrs); } /* Layout the decl in case anything changed. */ if (spec->type_required && DECL_P (*node) && (VAR_P (*node) || TREE_CODE (*node) == PARM_DECL || TREE_CODE (*node) == RESULT_DECL)) relayout_decl (*node); if (!no_add_attrs) { tree old_attrs; tree a; if (DECL_P (*anode)) old_attrs = DECL_ATTRIBUTES (*anode); else old_attrs = TYPE_ATTRIBUTES (*anode); for (a = lookup_attribute (spec->name, old_attrs); a != NULL_TREE; a = lookup_attribute (spec->name, TREE_CHAIN (a))) { if (simple_cst_equal (TREE_VALUE (a), args) == 1) break; } if (a == NULL_TREE) { /* This attribute isn't already in the list. */ if (DECL_P (*anode)) DECL_ATTRIBUTES (*anode) = tree_cons (name, args, old_attrs); else if (flags & (int) ATTR_FLAG_TYPE_IN_PLACE) { TYPE_ATTRIBUTES (*anode) = tree_cons (name, args, old_attrs); /* If this is the main variant, also push the attributes out to the other variants. */ if (*anode == TYPE_MAIN_VARIANT (*anode)) { tree variant; for (variant = *anode; variant; variant = TYPE_NEXT_VARIANT (variant)) { if (TYPE_ATTRIBUTES (variant) == old_attrs) TYPE_ATTRIBUTES (variant) = TYPE_ATTRIBUTES (*anode); else if (!lookup_attribute (spec->name, TYPE_ATTRIBUTES (variant))) TYPE_ATTRIBUTES (variant) = tree_cons (name, args, TYPE_ATTRIBUTES (variant)); } } } else *anode = build_type_attribute_variant (*anode, tree_cons (name, args, old_attrs)); } } if (fn_ptr_tmp) { /* Rebuild the function pointer type and put it in the appropriate place. */ fn_ptr_tmp = build_pointer_type (fn_ptr_tmp); if (fn_ptr_quals) fn_ptr_tmp = build_qualified_type (fn_ptr_tmp, fn_ptr_quals); if (DECL_P (*node)) TREE_TYPE (*node) = fn_ptr_tmp; else { gcc_assert (TREE_CODE (*node) == POINTER_TYPE); *node = fn_ptr_tmp; } } } return returned_attrs; }
static tree cp_genericize_r (tree *stmt_p, int *walk_subtrees, void *data) { tree stmt = *stmt_p; struct cp_genericize_data *wtd = (struct cp_genericize_data *) data; struct pointer_set_t *p_set = wtd->p_set; /* If in an OpenMP context, note var uses. */ if (__builtin_expect (wtd->omp_ctx != NULL, 0) && (VAR_P (stmt) || TREE_CODE (stmt) == PARM_DECL || TREE_CODE (stmt) == RESULT_DECL) && omp_var_to_track (stmt)) omp_cxx_notice_variable (wtd->omp_ctx, stmt); if (is_invisiref_parm (stmt) /* Don't dereference parms in a thunk, pass the references through. */ && !(DECL_THUNK_P (current_function_decl) && TREE_CODE (stmt) == PARM_DECL)) { *stmt_p = convert_from_reference (stmt); *walk_subtrees = 0; return NULL; } /* Map block scope extern declarations to visible declarations with the same name and type in outer scopes if any. */ if (cp_function_chain->extern_decl_map && VAR_OR_FUNCTION_DECL_P (stmt) && DECL_EXTERNAL (stmt)) { struct cxx_int_tree_map *h, in; in.uid = DECL_UID (stmt); h = (struct cxx_int_tree_map *) htab_find_with_hash (cp_function_chain->extern_decl_map, &in, in.uid); if (h) { *stmt_p = h->to; *walk_subtrees = 0; return NULL; } } /* Other than invisiref parms, don't walk the same tree twice. */ if (pointer_set_contains (p_set, stmt)) { *walk_subtrees = 0; return NULL_TREE; } if (TREE_CODE (stmt) == ADDR_EXPR && is_invisiref_parm (TREE_OPERAND (stmt, 0))) { /* If in an OpenMP context, note var uses. */ if (__builtin_expect (wtd->omp_ctx != NULL, 0) && omp_var_to_track (TREE_OPERAND (stmt, 0))) omp_cxx_notice_variable (wtd->omp_ctx, TREE_OPERAND (stmt, 0)); *stmt_p = convert (TREE_TYPE (stmt), TREE_OPERAND (stmt, 0)); *walk_subtrees = 0; } else if (TREE_CODE (stmt) == RETURN_EXPR && TREE_OPERAND (stmt, 0) && is_invisiref_parm (TREE_OPERAND (stmt, 0))) /* Don't dereference an invisiref RESULT_DECL inside a RETURN_EXPR. */ *walk_subtrees = 0; else if (TREE_CODE (stmt) == OMP_CLAUSE) switch (OMP_CLAUSE_CODE (stmt)) { case OMP_CLAUSE_LASTPRIVATE: /* Don't dereference an invisiref in OpenMP clauses. */ if (is_invisiref_parm (OMP_CLAUSE_DECL (stmt))) { *walk_subtrees = 0; if (OMP_CLAUSE_LASTPRIVATE_STMT (stmt)) cp_walk_tree (&OMP_CLAUSE_LASTPRIVATE_STMT (stmt), cp_genericize_r, data, NULL); } break; case OMP_CLAUSE_PRIVATE: /* Don't dereference an invisiref in OpenMP clauses. */ if (is_invisiref_parm (OMP_CLAUSE_DECL (stmt))) *walk_subtrees = 0; else if (wtd->omp_ctx != NULL) { /* Private clause doesn't cause any references to the var in outer contexts, avoid calling omp_cxx_notice_variable for it. */ struct cp_genericize_omp_taskreg *old = wtd->omp_ctx; wtd->omp_ctx = NULL; cp_walk_tree (&OMP_CLAUSE_DECL (stmt), cp_genericize_r, data, NULL); wtd->omp_ctx = old; *walk_subtrees = 0; } break; case OMP_CLAUSE_SHARED: case OMP_CLAUSE_FIRSTPRIVATE: case OMP_CLAUSE_COPYIN: case OMP_CLAUSE_COPYPRIVATE: /* Don't dereference an invisiref in OpenMP clauses. */ if (is_invisiref_parm (OMP_CLAUSE_DECL (stmt))) *walk_subtrees = 0; break; case OMP_CLAUSE_REDUCTION: /* Don't dereference an invisiref in reduction clause's OMP_CLAUSE_DECL either. OMP_CLAUSE_REDUCTION_{INIT,MERGE} still needs to be genericized. */ if (is_invisiref_parm (OMP_CLAUSE_DECL (stmt))) { *walk_subtrees = 0; if (OMP_CLAUSE_REDUCTION_INIT (stmt)) cp_walk_tree (&OMP_CLAUSE_REDUCTION_INIT (stmt), cp_genericize_r, data, NULL); if (OMP_CLAUSE_REDUCTION_MERGE (stmt)) cp_walk_tree (&OMP_CLAUSE_REDUCTION_MERGE (stmt), cp_genericize_r, data, NULL); } break; default: break; } else if (IS_TYPE_OR_DECL_P (stmt)) *walk_subtrees = 0; /* Due to the way voidify_wrapper_expr is written, we don't get a chance to lower this construct before scanning it, so we need to lower these before doing anything else. */ else if (TREE_CODE (stmt) == CLEANUP_STMT) *stmt_p = build2_loc (EXPR_LOCATION (stmt), CLEANUP_EH_ONLY (stmt) ? TRY_CATCH_EXPR : TRY_FINALLY_EXPR, void_type_node, CLEANUP_BODY (stmt), CLEANUP_EXPR (stmt)); else if (TREE_CODE (stmt) == IF_STMT) { genericize_if_stmt (stmt_p); /* *stmt_p has changed, tail recurse to handle it again. */ return cp_genericize_r (stmt_p, walk_subtrees, data); } /* COND_EXPR might have incompatible types in branches if one or both arms are bitfields. Fix it up now. */ else if (TREE_CODE (stmt) == COND_EXPR) { tree type_left = (TREE_OPERAND (stmt, 1) ? is_bitfield_expr_with_lowered_type (TREE_OPERAND (stmt, 1)) : NULL_TREE); tree type_right = (TREE_OPERAND (stmt, 2) ? is_bitfield_expr_with_lowered_type (TREE_OPERAND (stmt, 2)) : NULL_TREE); if (type_left && !useless_type_conversion_p (TREE_TYPE (stmt), TREE_TYPE (TREE_OPERAND (stmt, 1)))) { TREE_OPERAND (stmt, 1) = fold_convert (type_left, TREE_OPERAND (stmt, 1)); gcc_assert (useless_type_conversion_p (TREE_TYPE (stmt), type_left)); } if (type_right && !useless_type_conversion_p (TREE_TYPE (stmt), TREE_TYPE (TREE_OPERAND (stmt, 2)))) { TREE_OPERAND (stmt, 2) = fold_convert (type_right, TREE_OPERAND (stmt, 2)); gcc_assert (useless_type_conversion_p (TREE_TYPE (stmt), type_right)); } } else if (TREE_CODE (stmt) == BIND_EXPR) { if (__builtin_expect (wtd->omp_ctx != NULL, 0)) { tree decl; for (decl = BIND_EXPR_VARS (stmt); decl; decl = DECL_CHAIN (decl)) if (VAR_P (decl) && !DECL_EXTERNAL (decl) && omp_var_to_track (decl)) { splay_tree_node n = splay_tree_lookup (wtd->omp_ctx->variables, (splay_tree_key) decl); if (n == NULL) splay_tree_insert (wtd->omp_ctx->variables, (splay_tree_key) decl, TREE_STATIC (decl) ? OMP_CLAUSE_DEFAULT_SHARED : OMP_CLAUSE_DEFAULT_PRIVATE); } } wtd->bind_expr_stack.safe_push (stmt); cp_walk_tree (&BIND_EXPR_BODY (stmt), cp_genericize_r, data, NULL); wtd->bind_expr_stack.pop (); } else if (TREE_CODE (stmt) == USING_STMT) { tree block = NULL_TREE; /* Get the innermost inclosing GIMPLE_BIND that has a non NULL BLOCK, and append an IMPORTED_DECL to its BLOCK_VARS chained list. */ if (wtd->bind_expr_stack.exists ()) { int i; for (i = wtd->bind_expr_stack.length () - 1; i >= 0; i--) if ((block = BIND_EXPR_BLOCK (wtd->bind_expr_stack[i]))) break; } if (block) { tree using_directive; gcc_assert (TREE_OPERAND (stmt, 0)); using_directive = make_node (IMPORTED_DECL); TREE_TYPE (using_directive) = void_type_node; IMPORTED_DECL_ASSOCIATED_DECL (using_directive) = TREE_OPERAND (stmt, 0); DECL_CHAIN (using_directive) = BLOCK_VARS (block); BLOCK_VARS (block) = using_directive; } /* The USING_STMT won't appear in GENERIC. */ *stmt_p = build1 (NOP_EXPR, void_type_node, integer_zero_node); *walk_subtrees = 0; } else if (TREE_CODE (stmt) == DECL_EXPR && TREE_CODE (DECL_EXPR_DECL (stmt)) == USING_DECL) { /* Using decls inside DECL_EXPRs are just dropped on the floor. */ *stmt_p = build1 (NOP_EXPR, void_type_node, integer_zero_node); *walk_subtrees = 0; } else if (TREE_CODE (stmt) == OMP_PARALLEL || TREE_CODE (stmt) == OMP_TASK) { struct cp_genericize_omp_taskreg omp_ctx; tree c, decl; splay_tree_node n; *walk_subtrees = 0; cp_walk_tree (&OMP_CLAUSES (stmt), cp_genericize_r, data, NULL); omp_ctx.is_parallel = TREE_CODE (stmt) == OMP_PARALLEL; omp_ctx.default_shared = omp_ctx.is_parallel; omp_ctx.outer = wtd->omp_ctx; omp_ctx.variables = splay_tree_new (splay_tree_compare_decl_uid, 0, 0); wtd->omp_ctx = &omp_ctx; for (c = OMP_CLAUSES (stmt); c; c = OMP_CLAUSE_CHAIN (c)) switch (OMP_CLAUSE_CODE (c)) { case OMP_CLAUSE_SHARED: case OMP_CLAUSE_PRIVATE: case OMP_CLAUSE_FIRSTPRIVATE: case OMP_CLAUSE_LASTPRIVATE: decl = OMP_CLAUSE_DECL (c); if (decl == error_mark_node || !omp_var_to_track (decl)) break; n = splay_tree_lookup (omp_ctx.variables, (splay_tree_key) decl); if (n != NULL) break; splay_tree_insert (omp_ctx.variables, (splay_tree_key) decl, OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED ? OMP_CLAUSE_DEFAULT_SHARED : OMP_CLAUSE_DEFAULT_PRIVATE); if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_PRIVATE && omp_ctx.outer) omp_cxx_notice_variable (omp_ctx.outer, decl); break; case OMP_CLAUSE_DEFAULT: if (OMP_CLAUSE_DEFAULT_KIND (c) == OMP_CLAUSE_DEFAULT_SHARED) omp_ctx.default_shared = true; default: break; } cp_walk_tree (&OMP_BODY (stmt), cp_genericize_r, data, NULL); wtd->omp_ctx = omp_ctx.outer; splay_tree_delete (omp_ctx.variables); } else if (TREE_CODE (stmt) == CONVERT_EXPR) gcc_assert (!CONVERT_EXPR_VBASE_PATH (stmt)); else if (TREE_CODE (stmt) == FOR_STMT) genericize_for_stmt (stmt_p, walk_subtrees, data); else if (TREE_CODE (stmt) == WHILE_STMT) genericize_while_stmt (stmt_p, walk_subtrees, data); else if (TREE_CODE (stmt) == DO_STMT) genericize_do_stmt (stmt_p, walk_subtrees, data); else if (TREE_CODE (stmt) == SWITCH_STMT) genericize_switch_stmt (stmt_p, walk_subtrees, data); else if (TREE_CODE (stmt) == CONTINUE_STMT) genericize_continue_stmt (stmt_p); else if (TREE_CODE (stmt) == BREAK_STMT) genericize_break_stmt (stmt_p); else if (TREE_CODE (stmt) == OMP_FOR || TREE_CODE (stmt) == OMP_SIMD || TREE_CODE (stmt) == OMP_DISTRIBUTE) genericize_omp_for_stmt (stmt_p, walk_subtrees, data); else if (TREE_CODE (stmt) == SIZEOF_EXPR) { if (SIZEOF_EXPR_TYPE_P (stmt)) *stmt_p = cxx_sizeof_or_alignof_type (TREE_TYPE (TREE_OPERAND (stmt, 0)), SIZEOF_EXPR, false); else if (TYPE_P (TREE_OPERAND (stmt, 0))) *stmt_p = cxx_sizeof_or_alignof_type (TREE_OPERAND (stmt, 0), SIZEOF_EXPR, false); else *stmt_p = cxx_sizeof_or_alignof_expr (TREE_OPERAND (stmt, 0), SIZEOF_EXPR, false); if (*stmt_p == error_mark_node) *stmt_p = size_one_node; return NULL; } pointer_set_insert (p_set, *stmt_p); return NULL; }