tree expand_array_notation_exprs (tree t) { enum tree_code code; bool is_expr; location_t loc = UNKNOWN_LOCATION; if (!t) return t; loc = EXPR_LOCATION (t); code = TREE_CODE (t); is_expr = IS_EXPR_CODE_CLASS (TREE_CODE_CLASS (code)); switch (code) { case ERROR_MARK: case IDENTIFIER_NODE: case INTEGER_CST: case REAL_CST: case FIXED_CST: case STRING_CST: case BLOCK: case PLACEHOLDER_EXPR: case FIELD_DECL: case VOID_TYPE: case REAL_TYPE: case SSA_NAME: case LABEL_DECL: case RESULT_DECL: case VAR_DECL: case PARM_DECL: case NON_LVALUE_EXPR: case NOP_EXPR: case INIT_EXPR: case ADDR_EXPR: case ARRAY_REF: case BIT_FIELD_REF: case VECTOR_CST: case COMPLEX_CST: return t; case MODIFY_EXPR: if (contains_array_notation_expr (t)) t = expand_an_in_modify_expr (loc, TREE_OPERAND (t, 0), NOP_EXPR, TREE_OPERAND (t, 1), tf_warning_or_error); return t; case MODOP_EXPR: if (contains_array_notation_expr (t) && !processing_template_decl) t = expand_an_in_modify_expr (loc, TREE_OPERAND (t, 0), TREE_CODE (TREE_OPERAND (t, 1)), TREE_OPERAND (t, 2), tf_warning_or_error); return t; case CONSTRUCTOR: return t; case BIND_EXPR: { BIND_EXPR_BODY (t) = expand_array_notation_exprs (BIND_EXPR_BODY (t)); return t; } case DECL_EXPR: { tree x = DECL_EXPR_DECL (t); if (t && TREE_CODE (x) != FUNCTION_DECL) if (DECL_INITIAL (x)) t = expand_unary_array_notation_exprs (t); return t; } case STATEMENT_LIST: { tree_stmt_iterator i; for (i = tsi_start (t); !tsi_end_p (i); tsi_next (&i)) *tsi_stmt_ptr (i) = expand_array_notation_exprs (*tsi_stmt_ptr (i)); return t; } case OMP_PARALLEL: case OMP_TASK: case OMP_FOR: case OMP_SINGLE: case OMP_SECTION: case OMP_SECTIONS: case OMP_MASTER: case OMP_TASKGROUP: case OMP_ORDERED: case OMP_CRITICAL: case OMP_ATOMIC: case OMP_CLAUSE: case TARGET_EXPR: case INTEGER_TYPE: case ENUMERAL_TYPE: case BOOLEAN_TYPE: case POINTER_TYPE: case ARRAY_TYPE: case RECORD_TYPE: case METHOD_TYPE: return t; case RETURN_EXPR: if (contains_array_notation_expr (t)) t = expand_return_expr (t); return t; case PREDECREMENT_EXPR: case PREINCREMENT_EXPR: case POSTDECREMENT_EXPR: case POSTINCREMENT_EXPR: case AGGR_INIT_EXPR: case CALL_EXPR: t = expand_unary_array_notation_exprs (t); return t; case CONVERT_EXPR: case CLEANUP_POINT_EXPR: case EXPR_STMT: TREE_OPERAND (t, 0) = expand_array_notation_exprs (TREE_OPERAND (t, 0)); /* It is not necessary to wrap error_mark_node in EXPR_STMT. */ if (TREE_OPERAND (t, 0) == error_mark_node) return TREE_OPERAND (t, 0); return t; case TRUTH_ANDIF_EXPR: case TRUTH_ORIF_EXPR: case TRUTH_AND_EXPR: case TRUTH_OR_EXPR: case TRUTH_XOR_EXPR: case TRUTH_NOT_EXPR: case COND_EXPR: t = cp_expand_cond_array_notations (t); if (TREE_CODE (t) == COND_EXPR) { COND_EXPR_THEN (t) = expand_array_notation_exprs (COND_EXPR_THEN (t)); COND_EXPR_ELSE (t) = expand_array_notation_exprs (COND_EXPR_ELSE (t)); } return t; case FOR_STMT: if (contains_array_notation_expr (FOR_COND (t))) { error_at (EXPR_LOCATION (FOR_COND (t)), "array notation cannot be used in a condition for " "a for-loop"); return error_mark_node; } /* FIXME: Add a check for CILK_FOR_STMT here when we add Cilk tasking keywords. */ if (TREE_CODE (t) == FOR_STMT) { FOR_BODY (t) = expand_array_notation_exprs (FOR_BODY (t)); FOR_EXPR (t) = expand_array_notation_exprs (FOR_EXPR (t)); } else t = expand_array_notation_exprs (t); return t; case IF_STMT: t = cp_expand_cond_array_notations (t); /* If the above function added some extra instructions above the original if statement, then we can't assume it is still IF_STMT so we have to check again. */ if (TREE_CODE (t) == IF_STMT) { if (THEN_CLAUSE (t)) THEN_CLAUSE (t) = expand_array_notation_exprs (THEN_CLAUSE (t)); if (ELSE_CLAUSE (t)) ELSE_CLAUSE (t) = expand_array_notation_exprs (ELSE_CLAUSE (t)); } else t = expand_array_notation_exprs (t); return t; case SWITCH_STMT: if (contains_array_notation_expr (SWITCH_STMT_COND (t))) { error_at (EXPR_LOCATION (SWITCH_STMT_COND (t)), "array notation cannot be used as a condition for " "switch statement"); return error_mark_node; } if (SWITCH_STMT_BODY (t)) SWITCH_STMT_BODY (t) = expand_array_notation_exprs (SWITCH_STMT_BODY (t)); return t; case WHILE_STMT: if (contains_array_notation_expr (WHILE_COND (t))) { if (EXPR_LOCATION (WHILE_COND (t)) != UNKNOWN_LOCATION) loc = EXPR_LOCATION (WHILE_COND (t)); error_at (loc, "array notation cannot be used as a condition for " "while statement"); return error_mark_node; } if (WHILE_BODY (t)) WHILE_BODY (t) = expand_array_notation_exprs (WHILE_BODY (t)); return t; case DO_STMT: if (contains_array_notation_expr (DO_COND (t))) { error_at (EXPR_LOCATION (DO_COND (t)), "array notation cannot be used as a condition for a " "do-while statement"); return error_mark_node; } if (DO_BODY (t)) DO_BODY (t) = expand_array_notation_exprs (DO_BODY (t)); return t; default: if (is_expr) { int i, len; /* Walk over all the sub-trees of this operand. */ len = TREE_CODE_LENGTH (code); /* Go through the subtrees. We need to do this in forward order so that the scope of a FOR_EXPR is handled properly. */ for (i = 0; i < len; ++i) TREE_OPERAND (t, i) = expand_array_notation_exprs (TREE_OPERAND (t, i)); } return t; } return t; }
static tree expand_sec_reduce_builtin (tree an_builtin_fn, tree *new_var) { tree new_var_type = NULL_TREE, func_parm, new_yes_expr, new_no_expr; tree array_ind_value = NULL_TREE, new_no_ind, new_yes_ind, new_no_list; tree new_yes_list, new_cond_expr, new_expr = NULL_TREE; vec<tree, va_gc> *array_list = NULL, *array_operand = NULL; size_t list_size = 0, rank = 0, ii = 0; tree body, an_init, loop_with_init = alloc_stmt_list (); tree array_op0, comp_node = NULL_TREE; tree call_fn = NULL_TREE, identity_value = NULL_TREE; tree init = NULL_TREE, cond_init = NULL_TREE; enum tree_code code = NOP_EXPR; location_t location = UNKNOWN_LOCATION; vec<vec<an_parts> > an_info = vNULL; vec<an_loop_parts> an_loop_info = vNULL; enum built_in_function an_type = is_cilkplus_reduce_builtin (CALL_EXPR_FN (an_builtin_fn)); vec <tree, va_gc> *func_args; if (an_type == BUILT_IN_NONE) return NULL_TREE; if (an_type != BUILT_IN_CILKPLUS_SEC_REDUCE && an_type != BUILT_IN_CILKPLUS_SEC_REDUCE_MUTATING) func_parm = CALL_EXPR_ARG (an_builtin_fn, 0); else { call_fn = CALL_EXPR_ARG (an_builtin_fn, 2); /* We need to do this because we are "faking" the builtin function types, so the compiler does a bunch of typecasts and this will get rid of all that! */ STRIP_NOPS (call_fn); if (TREE_CODE (call_fn) != OVERLOAD && TREE_CODE (call_fn) != FUNCTION_DECL) call_fn = TREE_OPERAND (call_fn, 0); identity_value = CALL_EXPR_ARG (an_builtin_fn, 0); func_parm = CALL_EXPR_ARG (an_builtin_fn, 1); STRIP_NOPS (identity_value); } STRIP_NOPS (func_parm); location = EXPR_LOCATION (an_builtin_fn); /* Note about using find_rank (): If find_rank returns false, then it must have already reported an error, thus we just return an error_mark_node without any doing any error emission. */ if (!find_rank (location, an_builtin_fn, an_builtin_fn, true, &rank)) return error_mark_node; if (rank == 0) return an_builtin_fn; else if (rank > 1 && (an_type == BUILT_IN_CILKPLUS_SEC_REDUCE_MAX_IND || an_type == BUILT_IN_CILKPLUS_SEC_REDUCE_MIN_IND)) { error_at (location, "__sec_reduce_min_ind or __sec_reduce_max_ind cannot " "have arrays with dimension greater than 1"); return error_mark_node; } extract_array_notation_exprs (func_parm, true, &array_list); list_size = vec_safe_length (array_list); switch (an_type) { case BUILT_IN_CILKPLUS_SEC_REDUCE_ADD: case BUILT_IN_CILKPLUS_SEC_REDUCE_MUL: case BUILT_IN_CILKPLUS_SEC_REDUCE_MAX: case BUILT_IN_CILKPLUS_SEC_REDUCE_MIN: new_var_type = TREE_TYPE ((*array_list)[0]); break; case BUILT_IN_CILKPLUS_SEC_REDUCE_ALL_ZERO: case BUILT_IN_CILKPLUS_SEC_REDUCE_ANY_ZERO: case BUILT_IN_CILKPLUS_SEC_REDUCE_ANY_NONZERO: case BUILT_IN_CILKPLUS_SEC_REDUCE_ALL_NONZERO: new_var_type = boolean_type_node; break; case BUILT_IN_CILKPLUS_SEC_REDUCE_MAX_IND: case BUILT_IN_CILKPLUS_SEC_REDUCE_MIN_IND: new_var_type = size_type_node; break; case BUILT_IN_CILKPLUS_SEC_REDUCE: if (call_fn && identity_value) new_var_type = TREE_TYPE ((*array_list)[0]); break; case BUILT_IN_CILKPLUS_SEC_REDUCE_MUTATING: new_var_type = NULL_TREE; break; default: gcc_unreachable (); } if (new_var_type && TREE_CODE (new_var_type) == ARRAY_TYPE) new_var_type = TREE_TYPE (new_var_type); an_loop_info.safe_grow_cleared (rank); an_init = push_stmt_list (); /* Assign the array notation components to variable so that they can satisfy the exec-once rule. */ for (ii = 0; ii < list_size; ii++) if (TREE_CODE ((*array_list)[ii]) == ARRAY_NOTATION_REF) { tree anode = (*array_list)[ii]; make_triplet_val_inv (&ARRAY_NOTATION_START (anode)); make_triplet_val_inv (&ARRAY_NOTATION_LENGTH (anode)); make_triplet_val_inv (&ARRAY_NOTATION_STRIDE (anode)); } cilkplus_extract_an_triplets (array_list, list_size, rank, &an_info); for (ii = 0; ii < rank; ii++) { tree typ = ptrdiff_type_node; /* In this place, we are using get_temp_regvar instead of create_temporary_var if an_type is SEC_REDUCE_MAX/MIN_IND because the array_ind_value depends on this value being initalized to 0. */ if (an_type == BUILT_IN_CILKPLUS_SEC_REDUCE_MAX_IND || an_type == BUILT_IN_CILKPLUS_SEC_REDUCE_MIN_IND) an_loop_info[ii].var = get_temp_regvar (typ, build_zero_cst (typ)); else { an_loop_info[ii].var = create_temporary_var (typ); add_decl_expr (an_loop_info[ii].var); } an_loop_info[ii].ind_init = build_x_modify_expr (location, an_loop_info[ii].var, INIT_EXPR, build_zero_cst (typ), tf_warning_or_error); } array_operand = create_array_refs (location, an_info, an_loop_info, list_size, rank); replace_array_notations (&func_parm, true, array_list, array_operand); if (!TREE_TYPE (func_parm)) TREE_TYPE (func_parm) = TREE_TYPE ((*array_list)[0]); create_cmp_incr (location, &an_loop_info, rank, an_info, tf_warning_or_error); if (an_type == BUILT_IN_CILKPLUS_SEC_REDUCE_MAX_IND || an_type == BUILT_IN_CILKPLUS_SEC_REDUCE_MIN_IND) array_ind_value = get_temp_regvar (TREE_TYPE (func_parm), func_parm); array_op0 = (*array_operand)[0]; switch (an_type) { case BUILT_IN_CILKPLUS_SEC_REDUCE_ADD: code = PLUS_EXPR; init = build_zero_cst (new_var_type); break; case BUILT_IN_CILKPLUS_SEC_REDUCE_MUL: code = MULT_EXPR; init = build_one_cst (new_var_type); break; case BUILT_IN_CILKPLUS_SEC_REDUCE_ANY_ZERO: case BUILT_IN_CILKPLUS_SEC_REDUCE_ANY_NONZERO: code = ((an_type == BUILT_IN_CILKPLUS_SEC_REDUCE_ANY_ZERO) ? EQ_EXPR : NE_EXPR); init = build_zero_cst (new_var_type); cond_init = build_one_cst (new_var_type); comp_node = build_zero_cst (TREE_TYPE (func_parm)); break; case BUILT_IN_CILKPLUS_SEC_REDUCE_ALL_ZERO: case BUILT_IN_CILKPLUS_SEC_REDUCE_ALL_NONZERO: code = ((an_type == BUILT_IN_CILKPLUS_SEC_REDUCE_ALL_ZERO) ? NE_EXPR : EQ_EXPR); init = build_one_cst (new_var_type); cond_init = build_zero_cst (new_var_type); comp_node = build_zero_cst (TREE_TYPE (func_parm)); break; case BUILT_IN_CILKPLUS_SEC_REDUCE_MAX: code = MAX_EXPR; init = (TYPE_MIN_VALUE (new_var_type) ? TYPE_MIN_VALUE (new_var_type) : func_parm); break; case BUILT_IN_CILKPLUS_SEC_REDUCE_MIN: code = MIN_EXPR; init = (TYPE_MAX_VALUE (new_var_type) ? TYPE_MAX_VALUE (new_var_type) : func_parm); break; case BUILT_IN_CILKPLUS_SEC_REDUCE_MIN_IND: case BUILT_IN_CILKPLUS_SEC_REDUCE_MAX_IND: code = (an_type == BUILT_IN_CILKPLUS_SEC_REDUCE_MAX_IND ? LE_EXPR : GE_EXPR); init = an_loop_info[0].var; break; case BUILT_IN_CILKPLUS_SEC_REDUCE: init = identity_value; break; case BUILT_IN_CILKPLUS_SEC_REDUCE_MUTATING: init = NULL_TREE; break; default: gcc_unreachable (); } if (an_type != BUILT_IN_CILKPLUS_SEC_REDUCE_MUTATING) *new_var = get_temp_regvar (new_var_type, init); else *new_var = NULL_TREE; switch (an_type) { case BUILT_IN_CILKPLUS_SEC_REDUCE_ADD: case BUILT_IN_CILKPLUS_SEC_REDUCE_MUL: new_expr = build_x_modify_expr (location, *new_var, code, func_parm, tf_warning_or_error); break; case BUILT_IN_CILKPLUS_SEC_REDUCE_ALL_ZERO: case BUILT_IN_CILKPLUS_SEC_REDUCE_ALL_NONZERO: case BUILT_IN_CILKPLUS_SEC_REDUCE_ANY_ZERO: case BUILT_IN_CILKPLUS_SEC_REDUCE_ANY_NONZERO: /* In all these cases, assume the false case is true and as soon as we find a true case, set the true flag on and latch it in. */ new_yes_expr = build_x_modify_expr (location, *new_var, NOP_EXPR, cond_init, tf_warning_or_error); new_no_expr = build_x_modify_expr (location, *new_var, NOP_EXPR, *new_var, tf_warning_or_error); new_cond_expr = build_x_binary_op (location, code, func_parm, TREE_CODE (func_parm), comp_node, TREE_CODE (comp_node), NULL, tf_warning_or_error); new_expr = build_x_conditional_expr (location, new_cond_expr, new_yes_expr, new_no_expr, tf_warning_or_error); break; case BUILT_IN_CILKPLUS_SEC_REDUCE_MAX: case BUILT_IN_CILKPLUS_SEC_REDUCE_MIN: new_cond_expr = build_x_binary_op (location, code, *new_var, TREE_CODE (*new_var), func_parm, TREE_CODE (func_parm), NULL, tf_warning_or_error); new_expr = build_x_modify_expr (location, *new_var, NOP_EXPR, func_parm, tf_warning_or_error); break; case BUILT_IN_CILKPLUS_SEC_REDUCE_MAX_IND: case BUILT_IN_CILKPLUS_SEC_REDUCE_MIN_IND: new_yes_expr = build_x_modify_expr (location, array_ind_value, NOP_EXPR, func_parm, tf_warning_or_error); new_no_expr = build_x_modify_expr (location, array_ind_value, NOP_EXPR, array_ind_value, tf_warning_or_error); if (list_size > 1) new_yes_ind = build_x_modify_expr (location, *new_var, NOP_EXPR, an_loop_info[0].var, tf_warning_or_error); else new_yes_ind = build_x_modify_expr (location, *new_var, NOP_EXPR, TREE_OPERAND (array_op0, 1), tf_warning_or_error); new_no_ind = build_x_modify_expr (location, *new_var, NOP_EXPR, *new_var, tf_warning_or_error); new_yes_list = alloc_stmt_list (); append_to_statement_list (new_yes_ind, &new_yes_list); append_to_statement_list (new_yes_expr, &new_yes_list); new_no_list = alloc_stmt_list (); append_to_statement_list (new_no_ind, &new_no_list); append_to_statement_list (new_no_expr, &new_no_list); new_cond_expr = build_x_binary_op (location, code, array_ind_value, TREE_CODE (array_ind_value), func_parm, TREE_CODE (func_parm), NULL, tf_warning_or_error); new_expr = build_x_conditional_expr (location, new_cond_expr, new_yes_list, new_no_list, tf_warning_or_error); break; case BUILT_IN_CILKPLUS_SEC_REDUCE: case BUILT_IN_CILKPLUS_SEC_REDUCE_MUTATING: func_args = make_tree_vector (); if (an_type == BUILT_IN_CILKPLUS_SEC_REDUCE) vec_safe_push (func_args, *new_var); else vec_safe_push (func_args, identity_value); vec_safe_push (func_args, func_parm); new_expr = finish_call_expr (call_fn, &func_args, false, true, tf_warning_or_error); if (an_type == BUILT_IN_CILKPLUS_SEC_REDUCE) new_expr = build_x_modify_expr (location, *new_var, NOP_EXPR, new_expr, tf_warning_or_error); release_tree_vector (func_args); break; default: gcc_unreachable (); } an_init = pop_stmt_list (an_init); append_to_statement_list (an_init, &loop_with_init); body = new_expr; for (ii = 0; ii < rank; ii++) { tree new_loop = push_stmt_list (); create_an_loop (an_loop_info[ii].ind_init, an_loop_info[ii].cmp, an_loop_info[ii].incr, body); body = pop_stmt_list (new_loop); } append_to_statement_list (body, &loop_with_init); an_info.release (); an_loop_info.release (); return loop_with_init; }
tree c_finish_omp_for (location_t locus, enum tree_code code, tree declv, tree orig_declv, tree initv, tree condv, tree incrv, tree body, tree pre_body) { location_t elocus; bool fail = false; int i; if ((code == CILK_SIMD || code == CILK_FOR) && !c_check_cilk_loop (locus, TREE_VEC_ELT (declv, 0))) fail = true; gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (initv)); gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (condv)); gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (incrv)); for (i = 0; i < TREE_VEC_LENGTH (declv); i++) { tree decl = TREE_VEC_ELT (declv, i); tree init = TREE_VEC_ELT (initv, i); tree cond = TREE_VEC_ELT (condv, i); tree incr = TREE_VEC_ELT (incrv, i); elocus = locus; if (EXPR_HAS_LOCATION (init)) elocus = EXPR_LOCATION (init); /* Validate the iteration variable. */ if (!INTEGRAL_TYPE_P (TREE_TYPE (decl)) && TREE_CODE (TREE_TYPE (decl)) != POINTER_TYPE) { error_at (elocus, "invalid type for iteration variable %qE", decl); fail = true; } /* In the case of "for (int i = 0...)", init will be a decl. It should have a DECL_INITIAL that we can turn into an assignment. */ if (init == decl) { elocus = DECL_SOURCE_LOCATION (decl); init = DECL_INITIAL (decl); if (init == NULL) { error_at (elocus, "%qE is not initialized", decl); init = integer_zero_node; fail = true; } DECL_INITIAL (decl) = NULL_TREE; init = build_modify_expr (elocus, decl, NULL_TREE, NOP_EXPR, /* FIXME diagnostics: This should be the location of the INIT. */ elocus, init, NULL_TREE); } if (init != error_mark_node) { gcc_assert (TREE_CODE (init) == MODIFY_EXPR); gcc_assert (TREE_OPERAND (init, 0) == decl); } if (cond == NULL_TREE) { error_at (elocus, "missing controlling predicate"); fail = true; } else { bool cond_ok = false; if (EXPR_HAS_LOCATION (cond)) elocus = EXPR_LOCATION (cond); if (TREE_CODE (cond) == LT_EXPR || TREE_CODE (cond) == LE_EXPR || TREE_CODE (cond) == GT_EXPR || TREE_CODE (cond) == GE_EXPR || TREE_CODE (cond) == NE_EXPR || TREE_CODE (cond) == EQ_EXPR) { tree op0 = TREE_OPERAND (cond, 0); tree op1 = TREE_OPERAND (cond, 1); /* 2.5.1. The comparison in the condition is computed in the type of DECL, otherwise the behavior is undefined. For example: long n; int i; i < n; according to ISO will be evaluated as: (long)i < n; We want to force: i < (int)n; */ if (TREE_CODE (op0) == NOP_EXPR && decl == TREE_OPERAND (op0, 0)) { TREE_OPERAND (cond, 0) = TREE_OPERAND (op0, 0); TREE_OPERAND (cond, 1) = fold_build1_loc (elocus, NOP_EXPR, TREE_TYPE (decl), TREE_OPERAND (cond, 1)); } else if (TREE_CODE (op1) == NOP_EXPR && decl == TREE_OPERAND (op1, 0)) { TREE_OPERAND (cond, 1) = TREE_OPERAND (op1, 0); TREE_OPERAND (cond, 0) = fold_build1_loc (elocus, NOP_EXPR, TREE_TYPE (decl), TREE_OPERAND (cond, 0)); } if (decl == TREE_OPERAND (cond, 0)) cond_ok = true; else if (decl == TREE_OPERAND (cond, 1)) { TREE_SET_CODE (cond, swap_tree_comparison (TREE_CODE (cond))); TREE_OPERAND (cond, 1) = TREE_OPERAND (cond, 0); TREE_OPERAND (cond, 0) = decl; cond_ok = true; } if (TREE_CODE (cond) == NE_EXPR || TREE_CODE (cond) == EQ_EXPR) { if (!INTEGRAL_TYPE_P (TREE_TYPE (decl))) { if (code != CILK_SIMD && code != CILK_FOR) cond_ok = false; } else if (operand_equal_p (TREE_OPERAND (cond, 1), TYPE_MIN_VALUE (TREE_TYPE (decl)), 0)) TREE_SET_CODE (cond, TREE_CODE (cond) == NE_EXPR ? GT_EXPR : LE_EXPR); else if (operand_equal_p (TREE_OPERAND (cond, 1), TYPE_MAX_VALUE (TREE_TYPE (decl)), 0)) TREE_SET_CODE (cond, TREE_CODE (cond) == NE_EXPR ? LT_EXPR : GE_EXPR); else if (code != CILK_SIMD && code != CILK_FOR) cond_ok = false; } } if (!cond_ok) { error_at (elocus, "invalid controlling predicate"); fail = true; } } if (incr == NULL_TREE) { error_at (elocus, "missing increment expression"); fail = true; } else { bool incr_ok = false; if (EXPR_HAS_LOCATION (incr)) elocus = EXPR_LOCATION (incr); /* Check all the valid increment expressions: v++, v--, ++v, --v, v = v + incr, v = incr + v and v = v - incr. */ switch (TREE_CODE (incr)) { case POSTINCREMENT_EXPR: case PREINCREMENT_EXPR: case POSTDECREMENT_EXPR: case PREDECREMENT_EXPR: if (TREE_OPERAND (incr, 0) != decl) break; incr_ok = true; incr = c_omp_for_incr_canonicalize_ptr (elocus, decl, incr); break; case COMPOUND_EXPR: if (TREE_CODE (TREE_OPERAND (incr, 0)) != SAVE_EXPR || TREE_CODE (TREE_OPERAND (incr, 1)) != MODIFY_EXPR) break; incr = TREE_OPERAND (incr, 1); /* FALLTHRU */ case MODIFY_EXPR: if (TREE_OPERAND (incr, 0) != decl) break; if (TREE_OPERAND (incr, 1) == decl) break; if (TREE_CODE (TREE_OPERAND (incr, 1)) == PLUS_EXPR && (TREE_OPERAND (TREE_OPERAND (incr, 1), 0) == decl || TREE_OPERAND (TREE_OPERAND (incr, 1), 1) == decl)) incr_ok = true; else if ((TREE_CODE (TREE_OPERAND (incr, 1)) == MINUS_EXPR || (TREE_CODE (TREE_OPERAND (incr, 1)) == POINTER_PLUS_EXPR)) && TREE_OPERAND (TREE_OPERAND (incr, 1), 0) == decl) incr_ok = true; else { tree t = check_omp_for_incr_expr (elocus, TREE_OPERAND (incr, 1), decl); if (t != error_mark_node) { incr_ok = true; t = build2 (PLUS_EXPR, TREE_TYPE (decl), decl, t); incr = build2 (MODIFY_EXPR, void_type_node, decl, t); } } break; default: break; } if (!incr_ok) { error_at (elocus, "invalid increment expression"); fail = true; } } TREE_VEC_ELT (initv, i) = init; TREE_VEC_ELT (incrv, i) = incr; } if (fail) return NULL; else { tree t = make_node (code); TREE_TYPE (t) = void_type_node; OMP_FOR_INIT (t) = initv; OMP_FOR_COND (t) = condv; OMP_FOR_INCR (t) = incrv; OMP_FOR_BODY (t) = body; OMP_FOR_PRE_BODY (t) = pre_body; OMP_FOR_ORIG_DECLS (t) = orig_declv; SET_EXPR_LOCATION (t, locus); return t; } }
void c_omp_split_clauses (location_t loc, enum tree_code code, omp_clause_mask mask, tree clauses, tree *cclauses) { tree next, c; enum c_omp_clause_split s; int i; for (i = 0; i < C_OMP_CLAUSE_SPLIT_COUNT; i++) cclauses[i] = NULL; /* Add implicit nowait clause on #pragma omp parallel {for,for simd,sections}. */ if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0) switch (code) { case OMP_FOR: case OMP_SIMD: cclauses[C_OMP_CLAUSE_SPLIT_FOR] = build_omp_clause (loc, OMP_CLAUSE_NOWAIT); break; case OMP_SECTIONS: cclauses[C_OMP_CLAUSE_SPLIT_SECTIONS] = build_omp_clause (loc, OMP_CLAUSE_NOWAIT); break; default: break; } for (; clauses ; clauses = next) { next = OMP_CLAUSE_CHAIN (clauses); switch (OMP_CLAUSE_CODE (clauses)) { /* First the clauses that are unique to some constructs. */ case OMP_CLAUSE_DEVICE: case OMP_CLAUSE_MAP: case OMP_CLAUSE_IS_DEVICE_PTR: case OMP_CLAUSE_DEFAULTMAP: s = C_OMP_CLAUSE_SPLIT_TARGET; break; case OMP_CLAUSE_NUM_TEAMS: case OMP_CLAUSE_THREAD_LIMIT: s = C_OMP_CLAUSE_SPLIT_TEAMS; break; case OMP_CLAUSE_DIST_SCHEDULE: s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE; break; case OMP_CLAUSE_COPYIN: case OMP_CLAUSE_NUM_THREADS: case OMP_CLAUSE_PROC_BIND: s = C_OMP_CLAUSE_SPLIT_PARALLEL; break; case OMP_CLAUSE_ORDERED: case OMP_CLAUSE_NOWAIT: s = C_OMP_CLAUSE_SPLIT_FOR; break; case OMP_CLAUSE_SCHEDULE: s = C_OMP_CLAUSE_SPLIT_FOR; if (code != OMP_SIMD) OMP_CLAUSE_SCHEDULE_SIMD (clauses) = 0; break; case OMP_CLAUSE_SAFELEN: case OMP_CLAUSE_SIMDLEN: case OMP_CLAUSE_ALIGNED: s = C_OMP_CLAUSE_SPLIT_SIMD; break; case OMP_CLAUSE_GRAINSIZE: case OMP_CLAUSE_NUM_TASKS: case OMP_CLAUSE_FINAL: case OMP_CLAUSE_UNTIED: case OMP_CLAUSE_MERGEABLE: case OMP_CLAUSE_NOGROUP: case OMP_CLAUSE_PRIORITY: s = C_OMP_CLAUSE_SPLIT_TASKLOOP; break; /* Duplicate this to all of taskloop, distribute, for and simd. */ case OMP_CLAUSE_COLLAPSE: if (code == OMP_SIMD) { if ((mask & ((OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE) | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE) | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP))) != 0) { c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), OMP_CLAUSE_COLLAPSE); OMP_CLAUSE_COLLAPSE_EXPR (c) = OMP_CLAUSE_COLLAPSE_EXPR (clauses); OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_SIMD]; cclauses[C_OMP_CLAUSE_SPLIT_SIMD] = c; } else { /* This must be #pragma omp target simd */ s = C_OMP_CLAUSE_SPLIT_SIMD; break; } } if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0) { if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) != 0) { c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), OMP_CLAUSE_COLLAPSE); OMP_CLAUSE_COLLAPSE_EXPR (c) = OMP_CLAUSE_COLLAPSE_EXPR (clauses); OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_FOR]; cclauses[C_OMP_CLAUSE_SPLIT_FOR] = c; s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE; } else s = C_OMP_CLAUSE_SPLIT_FOR; } else if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP)) != 0) s = C_OMP_CLAUSE_SPLIT_TASKLOOP; else s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE; break; /* Private clause is supported on all constructs, it is enough to put it on the innermost one. For #pragma omp {for,sections} put it on parallel though, as that's what we did for OpenMP 3.1. */ case OMP_CLAUSE_PRIVATE: switch (code) { case OMP_SIMD: s = C_OMP_CLAUSE_SPLIT_SIMD; break; case OMP_FOR: case OMP_SECTIONS: case OMP_PARALLEL: s = C_OMP_CLAUSE_SPLIT_PARALLEL; break; case OMP_DISTRIBUTE: s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE; break; case OMP_TEAMS: s = C_OMP_CLAUSE_SPLIT_TEAMS; break; default: gcc_unreachable (); } break; /* Firstprivate clause is supported on all constructs but simd. Put it on the outermost of those and duplicate on teams and parallel. */ case OMP_CLAUSE_FIRSTPRIVATE: if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP)) != 0) { if (code == OMP_SIMD && (mask & ((OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS) | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS))) == 0) { /* This must be #pragma omp target simd. */ s = C_OMP_CLAUSE_SPLIT_TARGET; break; } c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), OMP_CLAUSE_FIRSTPRIVATE); OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses); OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_TARGET]; cclauses[C_OMP_CLAUSE_SPLIT_TARGET] = c; } if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0) { if ((mask & ((OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS) | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE))) != 0) { c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), OMP_CLAUSE_FIRSTPRIVATE); OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses); OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL]; cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL] = c; if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)) != 0) s = C_OMP_CLAUSE_SPLIT_TEAMS; else s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE; } else /* This must be #pragma omp parallel{, for{, simd}, sections} or #pragma omp target parallel. */ s = C_OMP_CLAUSE_SPLIT_PARALLEL; } else if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)) != 0) { /* This must be one of #pragma omp {,target }teams distribute #pragma omp target teams #pragma omp {,target }teams distribute simd. */ gcc_assert (code == OMP_DISTRIBUTE || code == OMP_TEAMS || code == OMP_SIMD); s = C_OMP_CLAUSE_SPLIT_TEAMS; } else if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) != 0) { /* This must be #pragma omp distribute simd. */ gcc_assert (code == OMP_SIMD); s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE; } else if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP)) != 0) { /* This must be #pragma omp taskloop simd. */ gcc_assert (code == OMP_SIMD); s = C_OMP_CLAUSE_SPLIT_TASKLOOP; } else { /* This must be #pragma omp for simd. */ gcc_assert (code == OMP_SIMD); s = C_OMP_CLAUSE_SPLIT_FOR; } break; /* Lastprivate is allowed on distribute, for, sections and simd. In parallel {for{, simd},sections} we actually want to put it on parallel rather than for or sections. */ case OMP_CLAUSE_LASTPRIVATE: if (code == OMP_DISTRIBUTE) { s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE; break; } if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) != 0) { c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), OMP_CLAUSE_LASTPRIVATE); OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses); OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_DISTRIBUTE]; cclauses[C_OMP_CLAUSE_SPLIT_DISTRIBUTE] = c; } if (code == OMP_FOR || code == OMP_SECTIONS) { if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0) s = C_OMP_CLAUSE_SPLIT_PARALLEL; else s = C_OMP_CLAUSE_SPLIT_FOR; break; } gcc_assert (code == OMP_SIMD); if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0) { c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), OMP_CLAUSE_LASTPRIVATE); OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses); if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0) s = C_OMP_CLAUSE_SPLIT_PARALLEL; else s = C_OMP_CLAUSE_SPLIT_FOR; OMP_CLAUSE_CHAIN (c) = cclauses[s]; cclauses[s] = c; } s = C_OMP_CLAUSE_SPLIT_SIMD; break; /* Shared and default clauses are allowed on parallel, teams and taskloop. */ case OMP_CLAUSE_SHARED: case OMP_CLAUSE_DEFAULT: if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP)) != 0) { s = C_OMP_CLAUSE_SPLIT_TASKLOOP; break; } if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)) != 0) { if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) == 0) { s = C_OMP_CLAUSE_SPLIT_TEAMS; break; } c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), OMP_CLAUSE_CODE (clauses)); if (OMP_CLAUSE_CODE (clauses) == OMP_CLAUSE_SHARED) OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses); else OMP_CLAUSE_DEFAULT_KIND (c) = OMP_CLAUSE_DEFAULT_KIND (clauses); OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_TEAMS]; cclauses[C_OMP_CLAUSE_SPLIT_TEAMS] = c; } s = C_OMP_CLAUSE_SPLIT_PARALLEL; break; /* Reduction is allowed on simd, for, parallel, sections and teams. Duplicate it on all of them, but omit on for or sections if parallel is present. */ case OMP_CLAUSE_REDUCTION: if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0) { if (code == OMP_SIMD) { c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), OMP_CLAUSE_REDUCTION); OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses); OMP_CLAUSE_REDUCTION_CODE (c) = OMP_CLAUSE_REDUCTION_CODE (clauses); OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = OMP_CLAUSE_REDUCTION_PLACEHOLDER (clauses); OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c) = OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (clauses); OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_SIMD]; cclauses[C_OMP_CLAUSE_SPLIT_SIMD] = c; } if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)) != 0) { c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), OMP_CLAUSE_REDUCTION); OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses); OMP_CLAUSE_REDUCTION_CODE (c) = OMP_CLAUSE_REDUCTION_CODE (clauses); OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = OMP_CLAUSE_REDUCTION_PLACEHOLDER (clauses); OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c) = OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (clauses); OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL]; cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL] = c; s = C_OMP_CLAUSE_SPLIT_TEAMS; } else if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0) s = C_OMP_CLAUSE_SPLIT_PARALLEL; else s = C_OMP_CLAUSE_SPLIT_FOR; } else if (code == OMP_SECTIONS || code == OMP_PARALLEL) s = C_OMP_CLAUSE_SPLIT_PARALLEL; else if (code == OMP_SIMD) s = C_OMP_CLAUSE_SPLIT_SIMD; else s = C_OMP_CLAUSE_SPLIT_TEAMS; break; case OMP_CLAUSE_IF: if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP)) != 0) s = C_OMP_CLAUSE_SPLIT_TASKLOOP; else if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0) { if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP)) != 0) { if (OMP_CLAUSE_IF_MODIFIER (clauses) == OMP_PARALLEL) s = C_OMP_CLAUSE_SPLIT_PARALLEL; else if (OMP_CLAUSE_IF_MODIFIER (clauses) == OMP_TARGET) s = C_OMP_CLAUSE_SPLIT_TARGET; else if (OMP_CLAUSE_IF_MODIFIER (clauses) == ERROR_MARK) { c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), OMP_CLAUSE_IF); OMP_CLAUSE_IF_MODIFIER (c) = OMP_CLAUSE_IF_MODIFIER (clauses); OMP_CLAUSE_IF_EXPR (c) = OMP_CLAUSE_IF_EXPR (clauses); OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_TARGET]; cclauses[C_OMP_CLAUSE_SPLIT_TARGET] = c; s = C_OMP_CLAUSE_SPLIT_PARALLEL; } else { error_at (OMP_CLAUSE_LOCATION (clauses), "expected %<parallel%> or %<target%> %<if%> " "clause modifier"); continue; } } else s = C_OMP_CLAUSE_SPLIT_PARALLEL; } else s = C_OMP_CLAUSE_SPLIT_TARGET; break; case OMP_CLAUSE_LINEAR: /* Linear clause is allowed on simd and for. Put it on the innermost construct. */ if (code == OMP_SIMD) s = C_OMP_CLAUSE_SPLIT_SIMD; else s = C_OMP_CLAUSE_SPLIT_FOR; break; default: gcc_unreachable (); } OMP_CLAUSE_CHAIN (clauses) = cclauses[s]; cclauses[s] = clauses; } if (!flag_checking) return; if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP)) == 0) gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_TARGET] == NULL_TREE); if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)) == 0) gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_TEAMS] == NULL_TREE); if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) == 0) gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_DISTRIBUTE] == NULL_TREE); if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) == 0) gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL] == NULL_TREE); if ((mask & ((OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE) | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP))) == 0 && code != OMP_SECTIONS) gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_FOR] == NULL_TREE); if (code != OMP_SIMD) gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_SIMD] == NULL_TREE); }
bool find_rank (location_t loc, tree orig_expr, tree expr, bool ignore_builtin_fn, size_t *rank) { tree ii_tree; size_t ii = 0, current_rank = 0; if (TREE_CODE (expr) == ARRAY_NOTATION_REF) { ii_tree = expr; while (ii_tree) { if (TREE_CODE (ii_tree) == ARRAY_NOTATION_REF) { current_rank++; ii_tree = ARRAY_NOTATION_ARRAY (ii_tree); } else if (TREE_CODE (ii_tree) == ARRAY_REF) ii_tree = TREE_OPERAND (ii_tree, 0); else if (TREE_CODE (ii_tree) == PARM_DECL || TREE_CODE (ii_tree) == VAR_DECL) break; } if (*rank == 0) /* In this case, all the expressions this function has encountered thus far have been scalars or expressions with zero rank. Please see header comment for examples of such expression. */ *rank = current_rank; else if (*rank != current_rank) { /* In this case, find rank is being recursed through a set of expression of the form A <OPERATION> B, where A and B both have array notations in them and the rank of A is not equal to rank of B. A simple example of such case is the following: X[:] + Y[:][:] */ *rank = current_rank; return false; } } else if (TREE_CODE (expr) == STATEMENT_LIST) { tree_stmt_iterator ii_tsi; for (ii_tsi = tsi_start (expr); !tsi_end_p (ii_tsi); tsi_next (&ii_tsi)) if (!find_rank (loc, orig_expr, *tsi_stmt_ptr (ii_tsi), ignore_builtin_fn, rank)) return false; } else { if (TREE_CODE (expr) == CALL_EXPR) { tree func_name = CALL_EXPR_FN (expr); tree prev_arg = NULL_TREE, arg; call_expr_arg_iterator iter; size_t prev_rank = 0; if (TREE_CODE (func_name) == ADDR_EXPR) if (!ignore_builtin_fn) if (is_cilkplus_reduce_builtin (func_name)) /* If it is a built-in function, then we know it returns a scalar. */ return true; if (!find_rank (loc, orig_expr, func_name, ignore_builtin_fn, rank)) return false; FOR_EACH_CALL_EXPR_ARG (arg, iter, expr) { if (!find_rank (loc, orig_expr, arg, ignore_builtin_fn, rank)) { if (prev_arg && EXPR_HAS_LOCATION (prev_arg) && prev_rank != *rank) error_at (EXPR_LOCATION (prev_arg), "rank mismatch between %qE and %qE", prev_arg, arg); else if (prev_arg && prev_rank != *rank) /* Here the original expression is printed as a "heads-up" to the programmer. This is because since there is no location information for the offending argument, the error could be in some internally generated code that is not visible for the programmer. Thus, the correct fix may lie in the original expression. */ error_at (loc, "rank mismatch in expression %qE", orig_expr); return false; } prev_arg = arg; prev_rank = *rank; } } else {
tree c_finish_omp_atomic (location_t loc, enum tree_code code, enum tree_code opcode, tree lhs, tree rhs, tree v, tree lhs1, tree rhs1, bool swapped, bool seq_cst, bool test) { tree x, type, addr, pre = NULL_TREE; if (lhs == error_mark_node || rhs == error_mark_node || v == error_mark_node || lhs1 == error_mark_node || rhs1 == error_mark_node) return error_mark_node; /* ??? According to one reading of the OpenMP spec, complex type are supported, but there are no atomic stores for any architecture. But at least icc 9.0 doesn't support complex types here either. And lets not even talk about vector types... */ type = TREE_TYPE (lhs); if (!INTEGRAL_TYPE_P (type) && !POINTER_TYPE_P (type) && !SCALAR_FLOAT_TYPE_P (type)) { error_at (loc, "invalid expression type for %<#pragma omp atomic%>"); return error_mark_node; } if (opcode == RDIV_EXPR) opcode = TRUNC_DIV_EXPR; /* ??? Validate that rhs does not overlap lhs. */ /* Take and save the address of the lhs. From then on we'll reference it via indirection. */ addr = build_unary_op (loc, ADDR_EXPR, lhs, 0); if (addr == error_mark_node) return error_mark_node; if (!test) addr = save_expr (addr); if (!test && TREE_CODE (addr) != SAVE_EXPR && (TREE_CODE (addr) != ADDR_EXPR || !VAR_P (TREE_OPERAND (addr, 0)))) { /* Make sure LHS is simple enough so that goa_lhs_expr_p can recognize it even after unsharing function body. */ tree var = create_tmp_var_raw (TREE_TYPE (addr)); DECL_CONTEXT (var) = current_function_decl; addr = build4 (TARGET_EXPR, TREE_TYPE (addr), var, addr, NULL, NULL); } lhs = build_indirect_ref (loc, addr, RO_NULL); if (code == OMP_ATOMIC_READ) { x = build1 (OMP_ATOMIC_READ, type, addr); SET_EXPR_LOCATION (x, loc); OMP_ATOMIC_SEQ_CST (x) = seq_cst; return build_modify_expr (loc, v, NULL_TREE, NOP_EXPR, loc, x, NULL_TREE); } /* There are lots of warnings, errors, and conversions that need to happen in the course of interpreting a statement. Use the normal mechanisms to do this, and then take it apart again. */ if (swapped) { rhs = build_binary_op (loc, opcode, rhs, lhs, 1); opcode = NOP_EXPR; } bool save = in_late_binary_op; in_late_binary_op = true; x = build_modify_expr (loc, lhs, NULL_TREE, opcode, loc, rhs, NULL_TREE); in_late_binary_op = save; if (x == error_mark_node) return error_mark_node; if (TREE_CODE (x) == COMPOUND_EXPR) { pre = TREE_OPERAND (x, 0); gcc_assert (TREE_CODE (pre) == SAVE_EXPR); x = TREE_OPERAND (x, 1); } gcc_assert (TREE_CODE (x) == MODIFY_EXPR); rhs = TREE_OPERAND (x, 1); /* Punt the actual generation of atomic operations to common code. */ if (code == OMP_ATOMIC) type = void_type_node; x = build2 (code, type, addr, rhs); SET_EXPR_LOCATION (x, loc); OMP_ATOMIC_SEQ_CST (x) = seq_cst; /* Generally it is hard to prove lhs1 and lhs are the same memory location, just diagnose different variables. */ if (rhs1 && VAR_P (rhs1) && VAR_P (lhs) && rhs1 != lhs && !test) { if (code == OMP_ATOMIC) error_at (loc, "%<#pragma omp atomic update%> uses two different " "variables for memory"); else error_at (loc, "%<#pragma omp atomic capture%> uses two different " "variables for memory"); return error_mark_node; } if (code != OMP_ATOMIC) { /* Generally it is hard to prove lhs1 and lhs are the same memory location, just diagnose different variables. */ if (lhs1 && VAR_P (lhs1) && VAR_P (lhs)) { if (lhs1 != lhs && !test) { error_at (loc, "%<#pragma omp atomic capture%> uses two " "different variables for memory"); return error_mark_node; } } x = build_modify_expr (loc, v, NULL_TREE, NOP_EXPR, loc, x, NULL_TREE); if (rhs1 && rhs1 != lhs) { tree rhs1addr = build_unary_op (loc, ADDR_EXPR, rhs1, 0); if (rhs1addr == error_mark_node) return error_mark_node; x = omit_one_operand_loc (loc, type, x, rhs1addr); } if (lhs1 && lhs1 != lhs) { tree lhs1addr = build_unary_op (loc, ADDR_EXPR, lhs1, 0); if (lhs1addr == error_mark_node) return error_mark_node; if (code == OMP_ATOMIC_CAPTURE_OLD) x = omit_one_operand_loc (loc, type, x, lhs1addr); else { if (!test) x = save_expr (x); x = omit_two_operands_loc (loc, type, x, x, lhs1addr); } } } else if (rhs1 && rhs1 != lhs) { tree rhs1addr = build_unary_op (loc, ADDR_EXPR, rhs1, 0); if (rhs1addr == error_mark_node) return error_mark_node; x = omit_one_operand_loc (loc, type, x, rhs1addr); } if (pre) x = omit_one_operand_loc (loc, type, x, pre); return x; }
int get_token() { while (1) { switch (_c) { case ' ': case '\t': getc(); break; case '#': do { getc(); } while (_c != '\n' && _c != EOF); break; case EOF: return token_eof; case '>': case '\n': { int c = _c; getc(); return c; } case '<': { getc(); switch (_c) { case '/': getc(); return token_block_end; case '!': getc(); return token_include; default: return '<'; } } case '\"': _t = _new<config_text>(_pool); _t->loc.start = getpos(); getc(); while (1) { switch (_c) { case EOF: case '\n': error_at(getpos(), "expected '\"'."); return fail; case '\\': getc(); if (_c != '\\' && _c != '"') { error_at(getpos(), "expected \\\\ or \\\""); return fail; } _pool->grow((char)_c); getc(); break; case '"': _pool->grow('\0'); _t->_text = (const char*)_pool->finish(); _t->loc.end = getpos(); getc(); return token_string; default: _pool->grow((char)_c); getc(); } } default: _t = _new<config_text>(_pool); _t->loc.start = getpos(); while (1) { switch (_c) { case EOF: case '#': case '\n': case '\t': case ' ': case '"': case '<': case '>': _pool->grow('\0'); _t->_text = (const char*)_pool->finish(); return token_string; default: _pool->grow((char)_c); _t->loc.end = getpos(); getc(); } } break; } } }
void GCCPluginApi::error(const GenericTree& decl, const Message& message) const { error_at(DECL_SOURCE_LOCATION(decl), (projectId + message).c_str(), "%s"); }
static void lto_varpool_replace_node (varpool_node *vnode, varpool_node *prevailing_node) { gcc_assert (!vnode->definition || prevailing_node->definition); gcc_assert (!vnode->analyzed || prevailing_node->analyzed); prevailing_node->clone_referring (vnode); if (vnode->force_output) prevailing_node->force_output = true; if (vnode->forced_by_abi) prevailing_node->forced_by_abi = true; /* Be sure we can garbage collect the initializer. */ if (DECL_INITIAL (vnode->decl) && vnode->decl != prevailing_node->decl) DECL_INITIAL (vnode->decl) = error_mark_node; /* Check and report ODR violations on virtual tables. */ if (DECL_VIRTUAL_P (vnode->decl) || DECL_VIRTUAL_P (prevailing_node->decl)) compare_virtual_tables (prevailing_node, vnode); if (vnode->tls_model != prevailing_node->tls_model) { bool error = false; /* Non-TLS and TLS never mix together. Also emulated model is not compatible with anything else. */ if (prevailing_node->tls_model == TLS_MODEL_NONE || prevailing_node->tls_model == TLS_MODEL_EMULATED || vnode->tls_model == TLS_MODEL_NONE || vnode->tls_model == TLS_MODEL_EMULATED) error = true; /* Linked is silently supporting transitions GD -> IE, GD -> LE, LD -> LE, IE -> LE, LD -> IE. Do the same transitions and error out on others. */ else if ((prevailing_node->tls_model == TLS_MODEL_REAL || prevailing_node->tls_model == TLS_MODEL_LOCAL_DYNAMIC) && (vnode->tls_model == TLS_MODEL_INITIAL_EXEC || vnode->tls_model == TLS_MODEL_LOCAL_EXEC)) prevailing_node->tls_model = vnode->tls_model; else if ((vnode->tls_model == TLS_MODEL_REAL || vnode->tls_model == TLS_MODEL_LOCAL_DYNAMIC) && (prevailing_node->tls_model == TLS_MODEL_INITIAL_EXEC || prevailing_node->tls_model == TLS_MODEL_LOCAL_EXEC)) ; else if (prevailing_node->tls_model == TLS_MODEL_INITIAL_EXEC && vnode->tls_model == TLS_MODEL_LOCAL_EXEC) prevailing_node->tls_model = vnode->tls_model; else if (vnode->tls_model == TLS_MODEL_INITIAL_EXEC && prevailing_node->tls_model == TLS_MODEL_LOCAL_EXEC) ; else error = true; if (error) { error_at (DECL_SOURCE_LOCATION (vnode->decl), "%qD is defined with tls model %s", vnode->decl, tls_model_names [vnode->tls_model]); inform (DECL_SOURCE_LOCATION (prevailing_node->decl), "previously defined here as %s", tls_model_names [prevailing_node->tls_model]); } } /* Finally remove the replaced node. */ vnode->remove (); }
static symtab_node * lto_symtab_resolve_symbols (symtab_node *first) { symtab_node *e; symtab_node *prevailing = NULL; /* Always set e->node so that edges are updated to reflect decl merging. */ for (e = first; e; e = e->next_sharing_asm_name) if (lto_symtab_symbol_p (e) && (e->resolution == LDPR_PREVAILING_DEF_IRONLY || e->resolution == LDPR_PREVAILING_DEF_IRONLY_EXP || e->resolution == LDPR_PREVAILING_DEF)) { prevailing = e; break; } /* If the chain is already resolved there is nothing else to do. */ if (prevailing) { /* Assert it's the only one. */ for (e = prevailing->next_sharing_asm_name; e; e = e->next_sharing_asm_name) if (lto_symtab_symbol_p (e) && (e->resolution == LDPR_PREVAILING_DEF_IRONLY || e->resolution == LDPR_PREVAILING_DEF_IRONLY_EXP || e->resolution == LDPR_PREVAILING_DEF)) fatal_error (input_location, "multiple prevailing defs for %qE", DECL_NAME (prevailing->decl)); return prevailing; } /* Find the single non-replaceable prevailing symbol and diagnose ODR violations. */ for (e = first; e; e = e->next_sharing_asm_name) { if (!lto_symtab_resolve_can_prevail_p (e)) continue; /* If we have a non-replaceable definition it prevails. */ if (!lto_symtab_resolve_replaceable_p (e)) { if (prevailing) { error_at (DECL_SOURCE_LOCATION (e->decl), "%qD has already been defined", e->decl); inform (DECL_SOURCE_LOCATION (prevailing->decl), "previously defined here"); } prevailing = e; } } if (prevailing) return prevailing; /* Do a second round choosing one from the replaceable prevailing decls. */ for (e = first; e; e = e->next_sharing_asm_name) { if (!lto_symtab_resolve_can_prevail_p (e)) continue; /* Choose the first function that can prevail as prevailing. */ if (TREE_CODE (e->decl) == FUNCTION_DECL) { prevailing = e; break; } /* From variables that can prevail choose the largest one. */ if (!prevailing || tree_int_cst_lt (DECL_SIZE (prevailing->decl), DECL_SIZE (e->decl)) /* When variables are equivalent try to chose one that has useful DECL_INITIAL. This makes sense for keyed vtables that are DECL_EXTERNAL but initialized. In units that do not need them we replace the initializer by error_mark_node to conserve memory. We know that the vtable is keyed outside the LTO unit - otherwise the keyed instance would prevail. We still can preserve useful info in the initializer. */ || (DECL_SIZE (prevailing->decl) == DECL_SIZE (e->decl) && (DECL_INITIAL (e->decl) && DECL_INITIAL (e->decl) != error_mark_node) && (!DECL_INITIAL (prevailing->decl) || DECL_INITIAL (prevailing->decl) == error_mark_node))) prevailing = e; } return prevailing; }
static rtx aarch64_simd_expand_args (rtx target, int icode, int have_retval, tree exp, builtin_simd_arg *args) { rtx pat; tree arg[SIMD_MAX_BUILTIN_ARGS]; rtx op[SIMD_MAX_BUILTIN_ARGS]; machine_mode tmode = insn_data[icode].operand[0].mode; machine_mode mode[SIMD_MAX_BUILTIN_ARGS]; int argc = 0; if (have_retval && (!target || GET_MODE (target) != tmode || !(*insn_data[icode].operand[0].predicate) (target, tmode))) target = gen_reg_rtx (tmode); for (;;) { builtin_simd_arg thisarg = args[argc]; if (thisarg == SIMD_ARG_STOP) break; else { arg[argc] = CALL_EXPR_ARG (exp, argc); op[argc] = expand_normal (arg[argc]); mode[argc] = insn_data[icode].operand[argc + have_retval].mode; switch (thisarg) { case SIMD_ARG_COPY_TO_REG: if (POINTER_TYPE_P (TREE_TYPE (arg[argc]))) op[argc] = convert_memory_address (Pmode, op[argc]); /*gcc_assert (GET_MODE (op[argc]) == mode[argc]); */ if (!(*insn_data[icode].operand[argc + have_retval].predicate) (op[argc], mode[argc])) op[argc] = copy_to_mode_reg (mode[argc], op[argc]); break; case SIMD_ARG_LANE_INDEX: /* Must be a previous operand into which this is an index. */ gcc_assert (argc > 0); if (CONST_INT_P (op[argc])) { enum machine_mode vmode = mode[argc - 1]; aarch64_simd_lane_bounds (op[argc], 0, GET_MODE_NUNITS (vmode), exp); /* Keep to GCC-vector-extension lane indices in the RTL. */ op[argc] = GEN_INT (ENDIAN_LANE_N (vmode, INTVAL (op[argc]))); } /* Fall through - if the lane index isn't a constant then the next case will error. */ case SIMD_ARG_CONSTANT: if (!(*insn_data[icode].operand[argc + have_retval].predicate) (op[argc], mode[argc])) { error_at (EXPR_LOCATION (exp), "incompatible type for argument %d, " "expected %<const int%>", argc + 1); return const0_rtx; } break; case SIMD_ARG_STOP: gcc_unreachable (); } argc++; } } if (have_retval) switch (argc) { case 1: pat = GEN_FCN (icode) (target, op[0]); break; case 2: pat = GEN_FCN (icode) (target, op[0], op[1]); break; case 3: pat = GEN_FCN (icode) (target, op[0], op[1], op[2]); break; case 4: pat = GEN_FCN (icode) (target, op[0], op[1], op[2], op[3]); break; case 5: pat = GEN_FCN (icode) (target, op[0], op[1], op[2], op[3], op[4]); break; default: gcc_unreachable (); } else switch (argc) { case 1: pat = GEN_FCN (icode) (op[0]); break; case 2: pat = GEN_FCN (icode) (op[0], op[1]); break; case 3: pat = GEN_FCN (icode) (op[0], op[1], op[2]); break; case 4: pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]); break; case 5: pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3], op[4]); break; default: gcc_unreachable (); } if (!pat) return NULL_RTX; emit_insn (pat); return target; }
static void riscv_parse_arch_string (const char *isa, int *flags, location_t loc) { const char *p = isa; if (strncmp (p, "rv32", 4) == 0) *flags &= ~MASK_64BIT, p += 4; else if (strncmp (p, "rv64", 4) == 0) *flags |= MASK_64BIT, p += 4; else { error_at (loc, "-march=%s: ISA string must begin with rv32 or rv64", isa); return; } if (*p == 'g') { p++; *flags &= ~MASK_RVE; *flags |= MASK_MUL; *flags |= MASK_ATOMIC; *flags |= MASK_HARD_FLOAT; *flags |= MASK_DOUBLE_FLOAT; } else if (*p == 'i') { p++; *flags &= ~MASK_RVE; *flags &= ~MASK_MUL; if (*p == 'm') *flags |= MASK_MUL, p++; *flags &= ~MASK_ATOMIC; if (*p == 'a') *flags |= MASK_ATOMIC, p++; *flags &= ~(MASK_HARD_FLOAT | MASK_DOUBLE_FLOAT); if (*p == 'f') { *flags |= MASK_HARD_FLOAT, p++; if (*p == 'd') { *flags |= MASK_DOUBLE_FLOAT; p++; } } } else if (*p == 'e') { p++; *flags |= MASK_RVE; if (*flags & MASK_64BIT) { error ("RV64E is not a valid base ISA"); return; } *flags &= ~MASK_MUL; if (*p == 'm') *flags |= MASK_MUL, p++; *flags &= ~MASK_ATOMIC; if (*p == 'a') *flags |= MASK_ATOMIC, p++; *flags &= ~(MASK_HARD_FLOAT | MASK_DOUBLE_FLOAT); } else { error_at (loc, "-march=%s: invalid ISA string", isa); return; } *flags &= ~MASK_RVC; if (*p == 'c') *flags |= MASK_RVC, p++; if (*p) { error_at (loc, "-march=%s: unsupported ISA substring %qs", isa, p); return; } }
static rtx aarch64_simd_expand_args (rtx target, int icode, int have_retval, tree exp, builtin_simd_arg *args) { rtx pat; tree arg[SIMD_MAX_BUILTIN_ARGS]; rtx op[SIMD_MAX_BUILTIN_ARGS]; machine_mode tmode = insn_data[icode].operand[0].mode; machine_mode mode[SIMD_MAX_BUILTIN_ARGS]; int argc = 0; if (have_retval && (!target || GET_MODE (target) != tmode || !(*insn_data[icode].operand[0].predicate) (target, tmode))) target = gen_reg_rtx (tmode); for (;;) { builtin_simd_arg thisarg = args[argc]; if (thisarg == SIMD_ARG_STOP) break; else { arg[argc] = CALL_EXPR_ARG (exp, argc); op[argc] = expand_normal (arg[argc]); mode[argc] = insn_data[icode].operand[argc + have_retval].mode; switch (thisarg) { case SIMD_ARG_COPY_TO_REG: if (POINTER_TYPE_P (TREE_TYPE (arg[argc]))) op[argc] = convert_memory_address (Pmode, op[argc]); /*gcc_assert (GET_MODE (op[argc]) == mode[argc]); */ if (!(*insn_data[icode].operand[argc + have_retval].predicate) (op[argc], mode[argc])) op[argc] = copy_to_mode_reg (mode[argc], op[argc]); break; case SIMD_ARG_CONSTANT: if (!(*insn_data[icode].operand[argc + have_retval].predicate) (op[argc], mode[argc])) { error_at (EXPR_LOCATION (exp), "incompatible type for argument %d, " "expected %<const int%>", argc + 1); return const0_rtx; } break; case SIMD_ARG_STOP: gcc_unreachable (); } argc++; } } if (have_retval) switch (argc) { case 1: pat = GEN_FCN (icode) (target, op[0]); break; case 2: pat = GEN_FCN (icode) (target, op[0], op[1]); break; case 3: pat = GEN_FCN (icode) (target, op[0], op[1], op[2]); break; case 4: pat = GEN_FCN (icode) (target, op[0], op[1], op[2], op[3]); break; case 5: pat = GEN_FCN (icode) (target, op[0], op[1], op[2], op[3], op[4]); break; default: gcc_unreachable (); } else switch (argc) { case 1: pat = GEN_FCN (icode) (op[0]); break; case 2: pat = GEN_FCN (icode) (op[0], op[1]); break; case 3: pat = GEN_FCN (icode) (op[0], op[1], op[2]); break; case 4: pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]); break; case 5: pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3], op[4]); break; default: gcc_unreachable (); } if (!pat) return NULL_RTX; emit_insn (pat); return target; }
static tree expand_an_in_modify_expr (location_t location, tree lhs, enum tree_code modifycode, tree rhs, tsubst_flags_t complain) { tree array_expr_lhs = NULL_TREE, array_expr_rhs = NULL_TREE; tree array_expr = NULL_TREE; tree body = NULL_TREE; vec<tree> cond_expr = vNULL; vec<tree, va_gc> *lhs_array_operand = NULL, *rhs_array_operand = NULL; size_t lhs_rank = 0, rhs_rank = 0, ii = 0; vec<tree, va_gc> *rhs_list = NULL, *lhs_list = NULL; size_t rhs_list_size = 0, lhs_list_size = 0; tree new_modify_expr, new_var = NULL_TREE, builtin_loop, scalar_mods; bool found_builtin_fn = false; tree an_init, loop_with_init = alloc_stmt_list (); vec<vec<an_parts> > lhs_an_info = vNULL, rhs_an_info = vNULL; vec<an_loop_parts> lhs_an_loop_info = vNULL, rhs_an_loop_info = vNULL; if (!find_rank (location, rhs, rhs, false, &rhs_rank)) return error_mark_node; extract_array_notation_exprs (rhs, false, &rhs_list); rhs_list_size = vec_safe_length (rhs_list); an_init = push_stmt_list (); if (rhs_rank) { scalar_mods = replace_invariant_exprs (&rhs); if (scalar_mods) finish_expr_stmt (scalar_mods); } for (ii = 0; ii < rhs_list_size; ii++) { tree rhs_node = (*rhs_list)[ii]; if (TREE_CODE (rhs_node) == CALL_EXPR) { builtin_loop = expand_sec_reduce_builtin (rhs_node, &new_var); if (builtin_loop == error_mark_node) return error_mark_node; else if (builtin_loop) { finish_expr_stmt (builtin_loop); found_builtin_fn = true; if (new_var) { vec <tree, va_gc> *rhs_sub_list = NULL, *new_var_list = NULL; vec_safe_push (rhs_sub_list, rhs_node); vec_safe_push (new_var_list, new_var); replace_array_notations (&rhs, false, rhs_sub_list, new_var_list); } } } } lhs_rank = 0; rhs_rank = 0; if (!find_rank (location, lhs, lhs, true, &lhs_rank) || !find_rank (location, rhs, rhs, true, &rhs_rank)) { pop_stmt_list (an_init); return error_mark_node; } /* If both are scalar, then the only reason why we will get this far is if there is some array notations inside it and was using a builtin array notation functions. If so, we have already broken those guys up and now a simple build_x_modify_expr would do. */ if (lhs_rank == 0 && rhs_rank == 0) { if (found_builtin_fn) { new_modify_expr = build_x_modify_expr (location, lhs, modifycode, rhs, complain); finish_expr_stmt (new_modify_expr); pop_stmt_list (an_init); return an_init; } else gcc_unreachable (); } /* If for some reason location is not set, then find if LHS or RHS has location info. If so, then use that so we atleast have an idea. */ if (location == UNKNOWN_LOCATION) { if (EXPR_LOCATION (lhs) != UNKNOWN_LOCATION) location = EXPR_LOCATION (lhs); else if (EXPR_LOCATION (rhs) != UNKNOWN_LOCATION) location = EXPR_LOCATION (rhs); } /* We need this when we have a scatter issue. */ extract_array_notation_exprs (lhs, true, &lhs_list); rhs_list = NULL; extract_array_notation_exprs (rhs, true, &rhs_list); rhs_list_size = vec_safe_length (rhs_list); lhs_list_size = vec_safe_length (lhs_list); if (lhs_rank == 0 && rhs_rank != 0) { error_at (location, "%qD cannot be scalar when %qD is not", lhs, rhs); return error_mark_node; } if (lhs_rank != 0 && rhs_rank != 0 && lhs_rank != rhs_rank) { error_at (location, "rank mismatch between %qE and %qE", lhs, rhs); return error_mark_node; } /* Assign the array notation components to variable so that they can satisfy the execute-once rule. */ for (ii = 0; ii < lhs_list_size; ii++) { tree anode = (*lhs_list)[ii]; make_triplet_val_inv (&ARRAY_NOTATION_START (anode)); make_triplet_val_inv (&ARRAY_NOTATION_LENGTH (anode)); make_triplet_val_inv (&ARRAY_NOTATION_STRIDE (anode)); } for (ii = 0; ii < rhs_list_size; ii++) if ((*rhs_list)[ii] && TREE_CODE ((*rhs_list)[ii]) == ARRAY_NOTATION_REF) { tree aa = (*rhs_list)[ii]; make_triplet_val_inv (&ARRAY_NOTATION_START (aa)); make_triplet_val_inv (&ARRAY_NOTATION_LENGTH (aa)); make_triplet_val_inv (&ARRAY_NOTATION_STRIDE (aa)); } lhs_an_loop_info.safe_grow_cleared (lhs_rank); if (rhs_rank) rhs_an_loop_info.safe_grow_cleared (rhs_rank); cond_expr.safe_grow_cleared (MAX (lhs_rank, rhs_rank)); cilkplus_extract_an_triplets (lhs_list, lhs_list_size, lhs_rank, &lhs_an_info); if (rhs_list) cilkplus_extract_an_triplets (rhs_list, rhs_list_size, rhs_rank, &rhs_an_info); if (length_mismatch_in_expr_p (EXPR_LOCATION (lhs), lhs_an_info) || (rhs_list && length_mismatch_in_expr_p (EXPR_LOCATION (rhs), rhs_an_info))) { pop_stmt_list (an_init); return error_mark_node; } tree rhs_len = ((rhs_list_size > 0 && rhs_rank > 0) ? rhs_an_info[0][0].length : NULL_TREE); tree lhs_len = ((lhs_list_size > 0 && lhs_rank > 0) ? lhs_an_info[0][0].length : NULL_TREE); if (lhs_list_size > 0 && rhs_list_size > 0 && lhs_rank > 0 && rhs_rank > 0 && TREE_CODE (lhs_len) == INTEGER_CST && rhs_len && TREE_CODE (rhs_len) == INTEGER_CST && !tree_int_cst_equal (rhs_len, lhs_len)) { error_at (location, "length mismatch between LHS and RHS"); pop_stmt_list (an_init); return error_mark_node; } for (ii = 0; ii < lhs_rank; ii++) { tree typ = ptrdiff_type_node; lhs_an_loop_info[ii].var = create_temporary_var (typ); add_decl_expr (lhs_an_loop_info[ii].var); lhs_an_loop_info[ii].ind_init = build_x_modify_expr (location, lhs_an_loop_info[ii].var, INIT_EXPR, build_zero_cst (typ), complain); } if (rhs_list_size > 0) { rhs_array_operand = fix_sec_implicit_args (location, rhs_list, lhs_an_loop_info, lhs_rank, lhs); if (!rhs_array_operand) return error_mark_node; } replace_array_notations (&rhs, true, rhs_list, rhs_array_operand); rhs_list_size = 0; rhs_list = NULL; extract_array_notation_exprs (rhs, true, &rhs_list); rhs_list_size = vec_safe_length (rhs_list); for (ii = 0; ii < rhs_rank; ii++) { tree typ = ptrdiff_type_node; rhs_an_loop_info[ii].var = create_temporary_var (typ); add_decl_expr (rhs_an_loop_info[ii].var); rhs_an_loop_info[ii].ind_init = build_x_modify_expr (location, rhs_an_loop_info[ii].var, INIT_EXPR, build_zero_cst (typ), complain); } if (lhs_rank) { lhs_array_operand = create_array_refs (location, lhs_an_info, lhs_an_loop_info, lhs_list_size, lhs_rank); replace_array_notations (&lhs, true, lhs_list, lhs_array_operand); } if (rhs_array_operand) vec_safe_truncate (rhs_array_operand, 0); if (rhs_rank) { rhs_array_operand = create_array_refs (location, rhs_an_info, rhs_an_loop_info, rhs_list_size, rhs_rank); /* Replace all the array refs created by the above function because this variable is blown away by the fix_sec_implicit_args function below. */ replace_array_notations (&rhs, true, rhs_list, rhs_array_operand); vec_safe_truncate (rhs_array_operand , 0); rhs_array_operand = fix_sec_implicit_args (location, rhs_list, rhs_an_loop_info, rhs_rank, rhs); if (!rhs_array_operand) return error_mark_node; replace_array_notations (&rhs, true, rhs_list, rhs_array_operand); } array_expr_rhs = rhs; array_expr_lhs = lhs; array_expr = build_x_modify_expr (location, array_expr_lhs, modifycode, array_expr_rhs, complain); create_cmp_incr (location, &lhs_an_loop_info, lhs_rank, lhs_an_info, complain); if (rhs_rank) create_cmp_incr (location, &rhs_an_loop_info, rhs_rank, rhs_an_info, complain); for (ii = 0; ii < MAX (rhs_rank, lhs_rank); ii++) if (ii < lhs_rank && ii < rhs_rank) cond_expr[ii] = build_x_binary_op (location, TRUTH_ANDIF_EXPR, lhs_an_loop_info[ii].cmp, TREE_CODE (lhs_an_loop_info[ii].cmp), rhs_an_loop_info[ii].cmp, TREE_CODE (rhs_an_loop_info[ii].cmp), NULL, complain); else if (ii < lhs_rank && ii >= rhs_rank) cond_expr[ii] = lhs_an_loop_info[ii].cmp; else /* No need to compare ii < rhs_rank && ii >= lhs_rank because in a valid Array notation expression, rank of RHS cannot be greater than LHS. */ gcc_unreachable (); an_init = pop_stmt_list (an_init); append_to_statement_list (an_init, &loop_with_init); body = array_expr; for (ii = 0; ii < MAX (lhs_rank, rhs_rank); ii++) { tree incr_list = alloc_stmt_list (); tree init_list = alloc_stmt_list (); tree new_loop = push_stmt_list (); if (lhs_rank) { append_to_statement_list (lhs_an_loop_info[ii].ind_init, &init_list); append_to_statement_list (lhs_an_loop_info[ii].incr, &incr_list); } if (rhs_rank) { append_to_statement_list (rhs_an_loop_info[ii].ind_init, &init_list); append_to_statement_list (rhs_an_loop_info[ii].incr, &incr_list); } create_an_loop (init_list, cond_expr[ii], incr_list, body); body = pop_stmt_list (new_loop); } append_to_statement_list (body, &loop_with_init); lhs_an_info.release (); lhs_an_loop_info.release (); if (rhs_rank) { rhs_an_info.release (); rhs_an_loop_info.release (); } cond_expr.release (); return loop_with_init; }
int read_item() { config_file *f; int tk; config_item *item; ll_failed_return(tk = get_token()); switch (tk) { case token_eof: f = _files.pop_front(); f->close(); if (!_files.empty()) { getc(); } return ok; case '\n': return ok; case token_string: { item = _new<config_item>(_pool, _cur); item->name = _t; while (1) { ll_failed_return(tk = get_token()); switch (tk) { case EOF: case '\n': if (item->value_count) { config_text *t; while ((t = (config_text*)list_pop())) { _pool->grow(t); } item->values = (config_text**)_pool->finish(); } else { item->values = nullptr; } push_item(item); return ok; case token_string: item->value_count++; list_push(_t); break; default: error_at(getpos(), "expected string or new line or EOF."); return fail; } } break; } case '<': { ll_failed_return(tk = get_token()); if (tk != token_string) { error_at(getpos(), "expected string."); return fail; } item = _new<config_item>(_pool, _cur); item->name = _t; while (1) { ll_failed_return(tk = get_token()); switch (tk) { case '>': ll_failed_return(tk = get_token()); if (tk != '\n' && tk != EOF) { error_at(getpos(), "expected new line or EOF."); return fail; } if (item->value_count) { config_text **p, *t; p = item->values = (config_text**)_pool->alloc(sizeof(config_text*) * item->value_count); while ((t = (config_text*)list_pop())) { *p++ = t; } } else { item->values = nullptr; } push_item(item); _cur = item; return ok; case token_string: item->value_count++; list_push(_t); break; default: error_at(getpos(), "expected string or '>'."); return fail; } } break; } case token_block_end: { ll_failed_return(tk = get_token()); if (tk != token_string) { error_at(getpos(), "expected string."); return fail; } if (_cur == nullptr || strcmp(*static_cast<config_item*>(_cur)->name, *_t) != 0) { error_at(getpos(), "unmatching item block."); return fail; } _cur = _cur->_parent; ll_failed_return(tk = get_token()); if (tk != '>') { error_at(getpos(), "expected '>'."); return fail; } ll_failed_return(tk = get_token()); if (tk != '\n' && tk != EOF) { error_at(getpos(), "expected new line or EOF."); return fail; } return ok; } case token_include: { ll_failed_return(tk = get_token()); if (tk != token_string) { error_at(getpos(), "expected string."); return fail; } ll_failed_return(tk = get_token()); if (tk != '>') { error_at(getpos(), "expected '>'."); return fail; } ll_failed_return(tk = get_token()); if (tk != '\n' && tk != EOF) { error_at(getpos(), "expected new line or EOF."); return fail; } f = _new<config_file>(_pool); if (ll_failed(f->open(*_t))) { error_at(_t->loc.start, "open file '%s' failed.", _t->text()); return fail; } for (auto& f2 : _files) { if ((f->stat.st_dev == f2.stat.st_dev) && (f->stat.st_ino == f2.stat.st_ino)) { error_at(_t->loc.start, "recursive include file '%s'.", _t->text()); return fail; } } _files.push_front(f); getc(); return ok; } } return 0; }
static tree cp_expand_cond_array_notations (tree orig_stmt) { vec<tree, va_gc> *array_list = NULL, *array_operand = NULL; size_t list_size = 0; size_t rank = 0, ii = 0; tree an_init, body, stmt = NULL_TREE; tree builtin_loop, new_var = NULL_TREE; tree loop_with_init = alloc_stmt_list (); location_t location = UNKNOWN_LOCATION; vec<vec<an_parts> > an_info = vNULL; vec<an_loop_parts> an_loop_info = vNULL; if (TREE_CODE (orig_stmt) == COND_EXPR) { size_t cond_rank = 0, yes_rank = 0, no_rank = 0; tree yes_expr = COND_EXPR_THEN (orig_stmt); tree no_expr = COND_EXPR_ELSE (orig_stmt); tree cond = COND_EXPR_COND (orig_stmt); if (!find_rank (EXPR_LOCATION (cond), cond, cond, true, &cond_rank) || !find_rank (EXPR_LOCATION (yes_expr), yes_expr, yes_expr, true, &yes_rank) || find_rank (EXPR_LOCATION (no_expr), no_expr, no_expr, true, &no_rank)) return error_mark_node; /* If the condition has a zero rank, then handle array notations in body separately. */ if (cond_rank == 0) return orig_stmt; if (cond_rank != yes_rank && yes_rank != 0) { error_at (EXPR_LOCATION (yes_expr), "rank mismatch with controlling" " expression of parent if-statement"); return error_mark_node; } else if (cond_rank != no_rank && no_rank != 0) { error_at (EXPR_LOCATION (no_expr), "rank mismatch with controlling " "expression of parent if-statement"); return error_mark_node; } } else if (TREE_CODE (orig_stmt) == IF_STMT) { size_t cond_rank = 0, yes_rank = 0, no_rank = 0; tree yes_expr = THEN_CLAUSE (orig_stmt); tree no_expr = ELSE_CLAUSE (orig_stmt); tree cond = IF_COND (orig_stmt); if (!find_rank (EXPR_LOCATION (cond), cond, cond, true, &cond_rank) || (yes_expr && !find_rank (EXPR_LOCATION (yes_expr), yes_expr, yes_expr, true, &yes_rank)) || (no_expr && !find_rank (EXPR_LOCATION (no_expr), no_expr, no_expr, true, &no_rank))) return error_mark_node; /* Same reasoning as for COND_EXPR. */ if (cond_rank == 0) return orig_stmt; else if (cond_rank != yes_rank && yes_rank != 0) { error_at (EXPR_LOCATION (yes_expr), "rank mismatch with controlling" " expression of parent if-statement"); return error_mark_node; } else if (cond_rank != no_rank && no_rank != 0) { error_at (EXPR_LOCATION (no_expr), "rank mismatch with controlling " "expression of parent if-statement"); return error_mark_node; } } else if (truth_value_p (TREE_CODE (orig_stmt))) { size_t left_rank = 0, right_rank = 0; tree left_expr = TREE_OPERAND (orig_stmt, 0); tree right_expr = TREE_OPERAND (orig_stmt, 1); if (!find_rank (EXPR_LOCATION (left_expr), left_expr, left_expr, true, &left_rank) || !find_rank (EXPR_LOCATION (right_expr), right_expr, right_expr, true, &right_rank)) return error_mark_node; if (right_rank == 0 && left_rank == 0) return orig_stmt; } if (!find_rank (EXPR_LOCATION (orig_stmt), orig_stmt, orig_stmt, true, &rank)) return error_mark_node; if (rank == 0) return orig_stmt; extract_array_notation_exprs (orig_stmt, false, &array_list); stmt = alloc_stmt_list (); for (ii = 0; ii < vec_safe_length (array_list); ii++) { tree array_node = (*array_list)[ii]; if (TREE_CODE (array_node) == CALL_EXPR || TREE_CODE (array_node) == AGGR_INIT_EXPR) { builtin_loop = expand_sec_reduce_builtin (array_node, &new_var); if (builtin_loop == error_mark_node) finish_expr_stmt (error_mark_node); else if (new_var) { vec<tree, va_gc> *sub_list = NULL, *new_var_list = NULL; vec_safe_push (sub_list, array_node); vec_safe_push (new_var_list, new_var); replace_array_notations (&orig_stmt, false, sub_list, new_var_list); append_to_statement_list (builtin_loop, &stmt); } } } append_to_statement_list (orig_stmt, &stmt); rank = 0; array_list = NULL; if (!find_rank (EXPR_LOCATION (stmt), stmt, stmt, true, &rank)) return error_mark_node; if (rank == 0) return stmt; extract_array_notation_exprs (stmt, true, &array_list); list_size = vec_safe_length (array_list); if (list_size == 0) return stmt; location = EXPR_LOCATION (orig_stmt); list_size = vec_safe_length (array_list); an_loop_info.safe_grow_cleared (rank); an_init = push_stmt_list (); /* Assign the array notation components to variable so that they can satisfy the exec-once rule. */ for (ii = 0; ii < list_size; ii++) { tree anode = (*array_list)[ii]; make_triplet_val_inv (&ARRAY_NOTATION_START (anode)); make_triplet_val_inv (&ARRAY_NOTATION_LENGTH (anode)); make_triplet_val_inv (&ARRAY_NOTATION_STRIDE (anode)); } cilkplus_extract_an_triplets (array_list, list_size, rank, &an_info); for (ii = 0; ii < rank; ii++) { tree typ = ptrdiff_type_node; an_loop_info[ii].var = create_temporary_var (typ); add_decl_expr (an_loop_info[ii].var); an_loop_info[ii].ind_init = build_x_modify_expr (location, an_loop_info[ii].var, INIT_EXPR, build_zero_cst (typ), tf_warning_or_error); } array_operand = create_array_refs (location, an_info, an_loop_info, list_size, rank); replace_array_notations (&stmt, true, array_list, array_operand); create_cmp_incr (location, &an_loop_info, rank, an_info, tf_warning_or_error); an_init = pop_stmt_list (an_init); append_to_statement_list (an_init, &loop_with_init); body = stmt; for (ii = 0; ii < rank; ii++) { tree new_loop = push_stmt_list (); create_an_loop (an_loop_info[ii].ind_init, an_loop_info[ii].cmp, an_loop_info[ii].incr, body); body = pop_stmt_list (new_loop); } append_to_statement_list (body, &loop_with_init); an_info.release (); an_loop_info.release (); return loop_with_init; }
static bool rs6000_handle_option (struct gcc_options *opts, struct gcc_options *opts_set, const struct cl_decoded_option *decoded, location_t loc) { enum fpu_type_t fpu_type = FPU_NONE; char *p, *q; size_t code = decoded->opt_index; const char *arg = decoded->arg; int value = decoded->value; switch (code) { case OPT_mfull_toc: opts->x_rs6000_isa_flags &= ~OPTION_MASK_MINIMAL_TOC; opts->x_TARGET_NO_FP_IN_TOC = 0; opts->x_TARGET_NO_SUM_IN_TOC = 0; opts_set->x_rs6000_isa_flags |= OPTION_MASK_MINIMAL_TOC; #ifdef TARGET_USES_SYSV4_OPT /* Note, V.4 no longer uses a normal TOC, so make -mfull-toc, be just the same as -mminimal-toc. */ opts->x_rs6000_isa_flags |= OPTION_MASK_MINIMAL_TOC; opts_set->x_rs6000_isa_flags |= OPTION_MASK_MINIMAL_TOC; #endif break; #ifdef TARGET_USES_SYSV4_OPT case OPT_mtoc: /* Make -mtoc behave like -mminimal-toc. */ opts->x_rs6000_isa_flags |= OPTION_MASK_MINIMAL_TOC; opts_set->x_rs6000_isa_flags |= OPTION_MASK_MINIMAL_TOC; break; #endif #ifdef TARGET_USES_AIX64_OPT case OPT_maix64: #else case OPT_m64: #endif opts->x_rs6000_isa_flags |= OPTION_MASK_POWERPC64; opts->x_rs6000_isa_flags |= (~opts_set->x_rs6000_isa_flags & OPTION_MASK_PPC_GFXOPT); opts_set->x_rs6000_isa_flags |= OPTION_MASK_POWERPC64; break; #ifdef TARGET_USES_AIX64_OPT case OPT_maix32: #else case OPT_m32: #endif opts->x_rs6000_isa_flags &= ~OPTION_MASK_POWERPC64; opts_set->x_rs6000_isa_flags |= OPTION_MASK_POWERPC64; break; case OPT_mminimal_toc: if (value == 1) { opts->x_TARGET_NO_FP_IN_TOC = 0; opts->x_TARGET_NO_SUM_IN_TOC = 0; } break; case OPT_mpowerpc_gpopt: case OPT_mpowerpc_gfxopt: break; case OPT_mdebug_: p = ASTRDUP (arg); opts->x_rs6000_debug = 0; while ((q = strtok (p, ",")) != NULL) { unsigned mask = 0; bool invert; p = NULL; if (*q == '!') { invert = true; q++; } else invert = false; if (! strcmp (q, "all")) mask = MASK_DEBUG_ALL; else if (! strcmp (q, "stack")) mask = MASK_DEBUG_STACK; else if (! strcmp (q, "arg")) mask = MASK_DEBUG_ARG; else if (! strcmp (q, "reg")) mask = MASK_DEBUG_REG; else if (! strcmp (q, "addr")) mask = MASK_DEBUG_ADDR; else if (! strcmp (q, "cost")) mask = MASK_DEBUG_COST; else if (! strcmp (q, "target")) mask = MASK_DEBUG_TARGET; else if (! strcmp (q, "builtin")) mask = MASK_DEBUG_BUILTIN; else error_at (loc, "unknown -mdebug-%s switch", q); if (invert) opts->x_rs6000_debug &= ~mask; else opts->x_rs6000_debug |= mask; } break; #ifdef TARGET_USES_SYSV4_OPT case OPT_mrelocatable: if (value == 1) { opts->x_rs6000_isa_flags |= OPTION_MASK_MINIMAL_TOC; opts_set->x_rs6000_isa_flags |= OPTION_MASK_MINIMAL_TOC; opts->x_TARGET_NO_FP_IN_TOC = 1; } break; case OPT_mrelocatable_lib: if (value == 1) { opts->x_rs6000_isa_flags |= (OPTION_MASK_RELOCATABLE | OPTION_MASK_MINIMAL_TOC); opts_set->x_rs6000_isa_flags |= (OPTION_MASK_RELOCATABLE | OPTION_MASK_MINIMAL_TOC); opts->x_TARGET_NO_FP_IN_TOC = 1; } else { opts->x_rs6000_isa_flags &= ~OPTION_MASK_RELOCATABLE; opts_set->x_rs6000_isa_flags |= OPTION_MASK_RELOCATABLE; } break; #endif case OPT_mabi_altivec: /* Enabling the AltiVec ABI turns off the SPE ABI. */ opts->x_rs6000_spe_abi = 0; break; case OPT_mabi_spe: opts->x_rs6000_altivec_abi = 0; break; case OPT_mlong_double_: if (value != 64 && value != 128) { error_at (loc, "unknown switch -mlong-double-%s", arg); opts->x_rs6000_long_double_type_size = RS6000_DEFAULT_LONG_DOUBLE_SIZE; return false; } break; case OPT_msingle_float: if (!TARGET_SINGLE_FPU) warning_at (loc, 0, "-msingle-float option equivalent to -mhard-float"); /* -msingle-float implies -mno-double-float and TARGET_HARD_FLOAT. */ opts->x_rs6000_double_float = 0; opts->x_rs6000_isa_flags &= ~OPTION_MASK_SOFT_FLOAT; opts_set->x_rs6000_isa_flags |= OPTION_MASK_SOFT_FLOAT; break; case OPT_mdouble_float: /* -mdouble-float implies -msingle-float and TARGET_HARD_FLOAT. */ opts->x_rs6000_single_float = 1; opts->x_rs6000_isa_flags &= ~OPTION_MASK_SOFT_FLOAT; opts_set->x_rs6000_isa_flags |= OPTION_MASK_SOFT_FLOAT; break; case OPT_msimple_fpu: if (!TARGET_SINGLE_FPU) warning_at (loc, 0, "-msimple-fpu option ignored"); break; case OPT_mhard_float: /* -mhard_float implies -msingle-float and -mdouble-float. */ opts->x_rs6000_single_float = opts->x_rs6000_double_float = 1; break; case OPT_msoft_float: /* -msoft_float implies -mnosingle-float and -mnodouble-float. */ opts->x_rs6000_single_float = opts->x_rs6000_double_float = 0; break; case OPT_mfpu_: fpu_type = (enum fpu_type_t) value; if (fpu_type != FPU_NONE) { /* If -mfpu is not none, then turn off SOFT_FLOAT, turn on HARD_FLOAT. */ opts->x_rs6000_isa_flags &= ~OPTION_MASK_SOFT_FLOAT; opts_set->x_rs6000_isa_flags |= OPTION_MASK_SOFT_FLOAT; opts->x_rs6000_xilinx_fpu = 1; if (fpu_type == FPU_SF_LITE || fpu_type == FPU_SF_FULL) opts->x_rs6000_single_float = 1; if (fpu_type == FPU_DF_LITE || fpu_type == FPU_DF_FULL) opts->x_rs6000_single_float = opts->x_rs6000_double_float = 1; if (fpu_type == FPU_SF_LITE || fpu_type == FPU_DF_LITE) opts->x_rs6000_simple_fpu = 1; } else { /* -mfpu=none is equivalent to -msoft-float. */ opts->x_rs6000_isa_flags |= OPTION_MASK_SOFT_FLOAT; opts_set->x_rs6000_isa_flags |= OPTION_MASK_SOFT_FLOAT; opts->x_rs6000_single_float = opts->x_rs6000_double_float = 0; } break; case OPT_mrecip: opts->x_rs6000_recip_name = (value) ? "default" : "none"; break; } return true; }