static tree vect_recog_temp_ssa_var (tree type, gimple stmt) { tree var = create_tmp_var (type, "patt"); add_referenced_var (var); var = make_ssa_name (var, stmt); return var; }
/* * add special KERNEXEC instrumentation: force MSB of fptr to 1, which will produce * a non-canonical address from a userland ptr and will just trigger a GPF on dereference */ static void kernexec_instrument_fptr_bts(gimple_stmt_iterator *gsi) { gimple assign_intptr, assign_new_fptr, call_stmt; tree intptr, orptr, old_fptr, new_fptr, kernexec_mask; call_stmt = gsi_stmt(*gsi); old_fptr = gimple_call_fn(call_stmt); // create temporary unsigned long variable used for bitops and cast fptr to it intptr = create_tmp_var(long_unsigned_type_node, "kernexec_bts"); #if BUILDING_GCC_VERSION <= 4007 add_referenced_var(intptr); #endif intptr = make_ssa_name(intptr, NULL); assign_intptr = gimple_build_assign(intptr, fold_convert(long_unsigned_type_node, old_fptr)); SSA_NAME_DEF_STMT(intptr) = assign_intptr; gsi_insert_before(gsi, assign_intptr, GSI_SAME_STMT); update_stmt(assign_intptr); // apply logical or to temporary unsigned long and bitmask kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0x8000000000000000LL); // kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0xffffffff80000000LL); orptr = fold_build2(BIT_IOR_EXPR, long_long_unsigned_type_node, intptr, kernexec_mask); intptr = make_ssa_name(SSA_NAME_VAR(intptr), NULL); assign_intptr = gimple_build_assign(intptr, orptr); SSA_NAME_DEF_STMT(intptr) = assign_intptr; gsi_insert_before(gsi, assign_intptr, GSI_SAME_STMT); update_stmt(assign_intptr); // cast temporary unsigned long back to a temporary fptr variable new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec_fptr"); #if BUILDING_GCC_VERSION <= 4007 add_referenced_var(new_fptr); #endif new_fptr = make_ssa_name(new_fptr, NULL); assign_new_fptr = gimple_build_assign(new_fptr, fold_convert(TREE_TYPE(old_fptr), intptr)); SSA_NAME_DEF_STMT(new_fptr) = assign_new_fptr; gsi_insert_before(gsi, assign_new_fptr, GSI_SAME_STMT); update_stmt(assign_new_fptr); // replace call stmt fn with the new fptr gimple_call_set_fn(call_stmt, new_fptr); update_stmt(call_stmt); }
static void mf_decl_cache_locals (void) { tree t, shift_init_stmts, mask_init_stmts; tree_stmt_iterator tsi; /* Build the cache vars. */ mf_cache_shift_decl_l = mf_mark (create_tmp_var (TREE_TYPE (mf_cache_shift_decl), "__mf_lookup_shift_l")); mf_cache_mask_decl_l = mf_mark (create_tmp_var (TREE_TYPE (mf_cache_mask_decl), "__mf_lookup_mask_l")); /* Build initialization nodes for the cache vars. We just load the globals into the cache variables. */ t = build2 (MODIFY_EXPR, TREE_TYPE (mf_cache_shift_decl_l), mf_cache_shift_decl_l, mf_cache_shift_decl); SET_EXPR_LOCATION (t, DECL_SOURCE_LOCATION (current_function_decl)); gimplify_to_stmt_list (&t); shift_init_stmts = t; t = build2 (MODIFY_EXPR, TREE_TYPE (mf_cache_mask_decl_l), mf_cache_mask_decl_l, mf_cache_mask_decl); SET_EXPR_LOCATION (t, DECL_SOURCE_LOCATION (current_function_decl)); gimplify_to_stmt_list (&t); mask_init_stmts = t; /* Anticipating multiple entry points, we insert the cache vars initializers in each successor of the ENTRY_BLOCK_PTR. */ for (tsi = tsi_start (shift_init_stmts); ! tsi_end_p (tsi); tsi_next (&tsi)) insert_edge_copies (tsi_stmt (tsi), ENTRY_BLOCK_PTR); for (tsi = tsi_start (mask_init_stmts); ! tsi_end_p (tsi); tsi_next (&tsi)) insert_edge_copies (tsi_stmt (tsi), ENTRY_BLOCK_PTR); bsi_commit_edge_inserts (); }
tree create_tmp_reg (tree type, const char *prefix) { tree tmp; tmp = create_tmp_var (type, prefix); if (TREE_CODE (type) == COMPLEX_TYPE || TREE_CODE (type) == VECTOR_TYPE) DECL_GIMPLE_REG_P (tmp) = 1; return tmp; }
static void gen_one_condition (tree arg, int lbub, enum tree_code tcode, const char *temp_name1, const char *temp_name2, vec<gimple *> conds, unsigned *nconds) { tree lbub_real_cst, lbub_cst, float_type; tree temp, tempn, tempc, tempcn; gassign *stmt1; gassign *stmt2; gcond *stmt3; float_type = TREE_TYPE (arg); lbub_cst = build_int_cst (integer_type_node, lbub); lbub_real_cst = build_real_from_int_cst (float_type, lbub_cst); temp = create_tmp_var (float_type, temp_name1); stmt1 = gimple_build_assign (temp, arg); tempn = make_ssa_name (temp, stmt1); gimple_assign_set_lhs (stmt1, tempn); tempc = create_tmp_var (boolean_type_node, temp_name2); stmt2 = gimple_build_assign (tempc, fold_build2 (tcode, boolean_type_node, tempn, lbub_real_cst)); tempcn = make_ssa_name (tempc, stmt2); gimple_assign_set_lhs (stmt2, tempcn); stmt3 = gimple_build_cond_from_tree (tempcn, NULL_TREE, NULL_TREE); conds.quick_push (stmt1); conds.quick_push (stmt2); conds.quick_push (stmt3); (*nconds)++; }
static tree rewrite_reciprocal (block_stmt_iterator *bsi) { tree stmt, lhs, rhs, stmt1, stmt2, var, name, tmp; tree real_one; stmt = bsi_stmt (*bsi); lhs = GENERIC_TREE_OPERAND (stmt, 0); rhs = GENERIC_TREE_OPERAND (stmt, 1); /* stmt must be GIMPLE_MODIFY_STMT. */ var = create_tmp_var (TREE_TYPE (rhs), "reciptmp"); add_referenced_var (var); DECL_GIMPLE_REG_P (var) = 1; if (TREE_CODE (TREE_TYPE (rhs)) == VECTOR_TYPE) { int i, len; tree list = NULL_TREE; real_one = build_real (TREE_TYPE (TREE_TYPE (rhs)), dconst1); len = TYPE_VECTOR_SUBPARTS (TREE_TYPE (rhs)); for (i = 0; i < len; i++) list = tree_cons (NULL, real_one, list); real_one = build_vector (TREE_TYPE (rhs), list); } else real_one = build_real (TREE_TYPE (rhs), dconst1); tmp = build2 (RDIV_EXPR, TREE_TYPE (rhs), real_one, TREE_OPERAND (rhs, 1)); stmt1 = build_gimple_modify_stmt (var, tmp); name = make_ssa_name (var, stmt1); GIMPLE_STMT_OPERAND (stmt1, 0) = name; tmp = build2 (MULT_EXPR, TREE_TYPE (rhs), name, TREE_OPERAND (rhs, 0)); stmt2 = build_gimple_modify_stmt (lhs, tmp); /* Replace division stmt with reciprocal and multiply stmts. The multiply stmt is not invariant, so update iterator and avoid rescanning. */ bsi_replace (bsi, stmt1, true); bsi_insert_after (bsi, stmt2, BSI_NEW_STMT); SSA_NAME_DEF_STMT (lhs) = stmt2; /* Continue processing with invariant reciprocal statement. */ return stmt1; }
tree ubsan_encode_value (tree t, bool in_expand_p) { tree type = TREE_TYPE (t); const unsigned int bitsize = GET_MODE_BITSIZE (TYPE_MODE (type)); if (bitsize <= POINTER_SIZE) switch (TREE_CODE (type)) { case BOOLEAN_TYPE: case ENUMERAL_TYPE: case INTEGER_TYPE: return fold_build1 (NOP_EXPR, pointer_sized_int_node, t); case REAL_TYPE: { tree itype = build_nonstandard_integer_type (bitsize, true); t = fold_build1 (VIEW_CONVERT_EXPR, itype, t); return fold_convert (pointer_sized_int_node, t); } default: gcc_unreachable (); } else { if (!DECL_P (t) || !TREE_ADDRESSABLE (t)) { /* The reason for this is that we don't want to pessimize code by making vars unnecessarily addressable. */ tree var = create_tmp_var (type, NULL); tree tem = build2 (MODIFY_EXPR, void_type_node, var, t); if (in_expand_p) { rtx mem = assign_stack_temp_for_type (TYPE_MODE (type), GET_MODE_SIZE (TYPE_MODE (type)), type); SET_DECL_RTL (var, mem); expand_assignment (var, t, false); return build_fold_addr_expr (var); } t = build_fold_addr_expr (var); return build2 (COMPOUND_EXPR, TREE_TYPE (t), tem, t); } else return build_fold_addr_expr (t); } }
static void kernexec_instrument_fptr_or(gimple_stmt_iterator *gsi) { gimple asm_or_stmt, call_stmt; tree old_fptr, new_fptr, input, output; #if BUILDING_GCC_VERSION <= 4007 VEC(tree, gc) *inputs = NULL; VEC(tree, gc) *outputs = NULL; #else vec<tree, va_gc> *inputs = NULL; vec<tree, va_gc> *outputs = NULL; #endif call_stmt = gsi_stmt(*gsi); old_fptr = gimple_call_fn(call_stmt); // create temporary fptr variable new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec_or"); #if BUILDING_GCC_VERSION <= 4007 add_referenced_var(new_fptr); #endif new_fptr = make_ssa_name(new_fptr, NULL); // build asm volatile("orq %%r10, %0\n\t" : "=r"(new_fptr) : "0"(old_fptr)); input = build_tree_list(NULL_TREE, build_string(2, "0")); input = chainon(NULL_TREE, build_tree_list(input, old_fptr)); output = build_tree_list(NULL_TREE, build_string(3, "=r")); output = chainon(NULL_TREE, build_tree_list(output, new_fptr)); #if BUILDING_GCC_VERSION <= 4007 VEC_safe_push(tree, gc, inputs, input); VEC_safe_push(tree, gc, outputs, output); #else vec_safe_push(inputs, input); vec_safe_push(outputs, output); #endif asm_or_stmt = gimple_build_asm_vec("orq %%r10, %0\n\t", inputs, outputs, NULL, NULL); SSA_NAME_DEF_STMT(new_fptr) = asm_or_stmt; gimple_asm_set_volatile(asm_or_stmt, true); gsi_insert_before(gsi, asm_or_stmt, GSI_SAME_STMT); update_stmt(asm_or_stmt); // replace call stmt fn with the new fptr gimple_call_set_fn(call_stmt, new_fptr); update_stmt(call_stmt); }
static void lower_builtin_posix_memalign (gimple_stmt_iterator *gsi) { gimple stmt, call = gsi_stmt (*gsi); tree pptr = gimple_call_arg (call, 0); tree align = gimple_call_arg (call, 1); tree res = gimple_call_lhs (call); tree ptr = create_tmp_reg (ptr_type_node, NULL); if (TREE_CODE (pptr) == ADDR_EXPR) { tree tem = create_tmp_var (ptr_type_node, NULL); TREE_ADDRESSABLE (tem) = 1; gimple_call_set_arg (call, 0, build_fold_addr_expr (tem)); stmt = gimple_build_assign (ptr, tem); } else stmt = gimple_build_assign (ptr, fold_build2 (MEM_REF, ptr_type_node, pptr, build_int_cst (ptr_type_node, 0))); if (res == NULL_TREE) { res = create_tmp_reg (integer_type_node, NULL); gimple_call_set_lhs (call, res); } tree align_label = create_artificial_label (UNKNOWN_LOCATION); tree noalign_label = create_artificial_label (UNKNOWN_LOCATION); gimple cond = gimple_build_cond (EQ_EXPR, res, integer_zero_node, align_label, noalign_label); gsi_insert_after (gsi, cond, GSI_NEW_STMT); gsi_insert_after (gsi, gimple_build_label (align_label), GSI_NEW_STMT); gsi_insert_after (gsi, stmt, GSI_NEW_STMT); stmt = gimple_build_call (builtin_decl_implicit (BUILT_IN_ASSUME_ALIGNED), 2, ptr, align); gimple_call_set_lhs (stmt, ptr); gsi_insert_after (gsi, stmt, GSI_NEW_STMT); stmt = gimple_build_assign (fold_build2 (MEM_REF, ptr_type_node, pptr, build_int_cst (ptr_type_node, 0)), ptr); gsi_insert_after (gsi, stmt, GSI_NEW_STMT); gsi_insert_after (gsi, gimple_build_label (noalign_label), GSI_NEW_STMT); }
/* Create a new global variable of type TYPE. */ tree add_new_static_var (tree type) { tree new_decl; struct varpool_node *new_node; new_decl = create_tmp_var (type, NULL); DECL_NAME (new_decl) = create_tmp_var_name (NULL); TREE_READONLY (new_decl) = 0; TREE_STATIC (new_decl) = 1; TREE_USED (new_decl) = 1; DECL_CONTEXT (new_decl) = NULL_TREE; DECL_ABSTRACT (new_decl) = 0; lang_hooks.dup_lang_specific_decl (new_decl); create_var_ann (new_decl); new_node = varpool_node (new_decl); varpool_mark_needed_node (new_node); add_referenced_var (new_decl); varpool_finalize_decl (new_decl); return new_node->decl; }
tree ubsan_encode_value (tree t) { tree type = TREE_TYPE (t); switch (TREE_CODE (type)) { case INTEGER_TYPE: if (TYPE_PRECISION (type) <= POINTER_SIZE) return fold_build1 (NOP_EXPR, pointer_sized_int_node, t); else return build_fold_addr_expr (t); case REAL_TYPE: { unsigned int bitsize = GET_MODE_BITSIZE (TYPE_MODE (type)); if (bitsize <= POINTER_SIZE) { tree itype = build_nonstandard_integer_type (bitsize, true); t = fold_build1 (VIEW_CONVERT_EXPR, itype, t); return fold_convert (pointer_sized_int_node, t); } else { if (!TREE_ADDRESSABLE (t)) { /* The reason for this is that we don't want to pessimize code by making vars unnecessarily addressable. */ tree var = create_tmp_var (TREE_TYPE (t), NULL); tree tem = build2 (MODIFY_EXPR, void_type_node, var, t); t = build_fold_addr_expr (var); return build2 (COMPOUND_EXPR, TREE_TYPE (t), tem, t); } else return build_fold_addr_expr (t); } } default: gcc_unreachable (); } }
static tree fix_array_notation_call_expr (tree arg) { vec<tree, va_gc> *array_list = NULL, *array_operand = NULL; tree new_var = NULL_TREE; size_t list_size = 0, rank = 0, ii = 0; tree loop_init; tree body, loop_with_init = alloc_stmt_list (); location_t location = UNKNOWN_LOCATION; vec<vec<an_parts> > an_info = vNULL; vec<an_loop_parts> an_loop_info = vNULL; if (TREE_CODE (arg) == CALL_EXPR && is_cilkplus_reduce_builtin (CALL_EXPR_FN (arg))) { loop_init = fix_builtin_array_notation_fn (arg, &new_var); /* We are ignoring the new var because either the user does not want to capture it OR he is using sec_reduce_mutating function. */ return loop_init; } if (!find_rank (location, arg, arg, false, &rank)) return error_mark_node; if (rank == 0) return arg; extract_array_notation_exprs (arg, true, &array_list); if (vec_safe_length (array_list) == 0) return arg; list_size = vec_safe_length (array_list); location = EXPR_LOCATION (arg); an_loop_info.safe_grow_cleared (rank); loop_init = push_stmt_list (); for (ii = 0; ii < list_size; ii++) if ((*array_list)[ii] && TREE_CODE ((*array_list)[ii]) == ARRAY_NOTATION_REF) { tree array_node = (*array_list)[ii]; make_triplet_val_inv (location, &ARRAY_NOTATION_START (array_node)); make_triplet_val_inv (location, &ARRAY_NOTATION_LENGTH (array_node)); make_triplet_val_inv (location, &ARRAY_NOTATION_STRIDE (array_node)); } cilkplus_extract_an_triplets (array_list, list_size, rank, &an_info); if (length_mismatch_in_expr_p (location, an_info)) { pop_stmt_list (loop_init); return error_mark_node; } for (ii = 0; ii < rank; ii++) { an_loop_info[ii].var = create_tmp_var (integer_type_node); an_loop_info[ii].ind_init = build_modify_expr (location, an_loop_info[ii].var, TREE_TYPE (an_loop_info[ii].var), NOP_EXPR, location, build_int_cst (TREE_TYPE (an_loop_info[ii].var), 0), TREE_TYPE (an_loop_info[ii].var)); } array_operand = create_array_refs (location, an_info, an_loop_info, list_size, rank); replace_array_notations (&arg, true, array_list, array_operand); create_cmp_incr (location, &an_loop_info, rank, an_info); loop_init = pop_stmt_list (loop_init); append_to_statement_list_force (loop_init, &loop_with_init); body = arg; for (ii = 0; ii < rank; ii++) { tree new_loop = push_stmt_list (); add_stmt (an_loop_info[ii].ind_init); c_finish_loop (location, an_loop_info[ii].cmp, an_loop_info[ii].incr, body, NULL_TREE, NULL_TREE, true); body = pop_stmt_list (new_loop); } append_to_statement_list_force (body, &loop_with_init); an_loop_info.release (); an_info.release (); return loop_with_init; }
struct c_expr fix_array_notation_expr (location_t location, enum tree_code code, struct c_expr arg) { vec<tree, va_gc> *array_list = NULL, *array_operand = NULL; size_t list_size = 0, rank = 0, ii = 0; tree loop_init; tree body, loop_with_init = alloc_stmt_list (); vec<vec<an_parts> > an_info = vNULL; vec<an_loop_parts> an_loop_info = vNULL; if (!find_rank (location, arg.value, arg.value, false, &rank)) { /* If this function returns a NULL, we convert the tree value in the structure to error_mark_node and the parser should take care of the rest. */ arg.value = error_mark_node; return arg; } if (rank == 0) return arg; extract_array_notation_exprs (arg.value, true, &array_list); if (vec_safe_length (array_list) == 0) return arg; list_size = vec_safe_length (array_list); an_loop_info.safe_grow_cleared (rank); cilkplus_extract_an_triplets (array_list, list_size, rank, &an_info); loop_init = push_stmt_list (); for (ii = 0; ii < rank; ii++) { an_loop_info[ii].var = create_tmp_var (integer_type_node); an_loop_info[ii].ind_init = build_modify_expr (location, an_loop_info[ii].var, TREE_TYPE (an_loop_info[ii].var), NOP_EXPR, location, build_int_cst (TREE_TYPE (an_loop_info[ii].var), 0), TREE_TYPE (an_loop_info[ii].var));; } array_operand = create_array_refs (location, an_info, an_loop_info, list_size, rank); replace_array_notations (&arg.value, true, array_list, array_operand); create_cmp_incr (location, &an_loop_info, rank, an_info); arg = default_function_array_read_conversion (location, arg); if (code == POSTINCREMENT_EXPR || code == POSTDECREMENT_EXPR) arg.value = build_unary_op (location, code, arg.value, 0); else if (code == PREINCREMENT_EXPR || code == PREDECREMENT_EXPR) arg = parser_build_unary_op (location, code, arg); loop_init = pop_stmt_list (loop_init); append_to_statement_list_force (loop_init, &loop_with_init); body = arg.value; for (ii = 0; ii < rank; ii++) { tree new_loop = push_stmt_list (); add_stmt (an_loop_info[ii].ind_init); c_finish_loop (location, an_loop_info[ii].cmp, an_loop_info[ii].incr, body, NULL_TREE, NULL_TREE, true); body = pop_stmt_list (new_loop); } append_to_statement_list_force (body, &loop_with_init); arg.value = loop_with_init; an_info.release (); an_loop_info.release (); return arg; }
static tree rewrite_bittest (block_stmt_iterator *bsi) { tree stmt, lhs, rhs, var, name, use_stmt, stmt1, stmt2, t; use_operand_p use; stmt = bsi_stmt (*bsi); lhs = GENERIC_TREE_OPERAND (stmt, 0); rhs = GENERIC_TREE_OPERAND (stmt, 1); /* Verify that the single use of lhs is a comparison against zero. */ if (TREE_CODE (lhs) != SSA_NAME || !single_imm_use (lhs, &use, &use_stmt) || TREE_CODE (use_stmt) != COND_EXPR) return stmt; t = COND_EXPR_COND (use_stmt); if (TREE_OPERAND (t, 0) != lhs || (TREE_CODE (t) != NE_EXPR && TREE_CODE (t) != EQ_EXPR) || !integer_zerop (TREE_OPERAND (t, 1))) return stmt; /* Get at the operands of the shift. The rhs is TMP1 & 1. */ stmt1 = SSA_NAME_DEF_STMT (TREE_OPERAND (rhs, 0)); if (TREE_CODE (stmt1) != GIMPLE_MODIFY_STMT) return stmt; /* There is a conversion in between possibly inserted by fold. */ t = GIMPLE_STMT_OPERAND (stmt1, 1); if (TREE_CODE (t) == NOP_EXPR || TREE_CODE (t) == CONVERT_EXPR) { t = TREE_OPERAND (t, 0); if (TREE_CODE (t) != SSA_NAME || !has_single_use (t)) return stmt; stmt1 = SSA_NAME_DEF_STMT (t); if (TREE_CODE (stmt1) != GIMPLE_MODIFY_STMT) return stmt; t = GIMPLE_STMT_OPERAND (stmt1, 1); } /* Verify that B is loop invariant but A is not. Verify that with all the stmt walking we are still in the same loop. */ if (TREE_CODE (t) == RSHIFT_EXPR && loop_containing_stmt (stmt1) == loop_containing_stmt (stmt) && outermost_invariant_loop_expr (TREE_OPERAND (t, 1), loop_containing_stmt (stmt1)) != NULL && outermost_invariant_loop_expr (TREE_OPERAND (t, 0), loop_containing_stmt (stmt1)) == NULL) { tree a = TREE_OPERAND (t, 0); tree b = TREE_OPERAND (t, 1); /* 1 << B */ var = create_tmp_var (TREE_TYPE (a), "shifttmp"); add_referenced_var (var); t = fold_build2 (LSHIFT_EXPR, TREE_TYPE (a), build_int_cst (TREE_TYPE (a), 1), b); stmt1 = build_gimple_modify_stmt (var, t); name = make_ssa_name (var, stmt1); GIMPLE_STMT_OPERAND (stmt1, 0) = name; /* A & (1 << B) */ t = fold_build2 (BIT_AND_EXPR, TREE_TYPE (a), a, name); stmt2 = build_gimple_modify_stmt (var, t); name = make_ssa_name (var, stmt2); GIMPLE_STMT_OPERAND (stmt2, 0) = name; /* Replace the SSA_NAME we compare against zero. Adjust the type of zero accordingly. */ SET_USE (use, name); TREE_OPERAND (COND_EXPR_COND (use_stmt), 1) = build_int_cst_type (TREE_TYPE (name), 0); bsi_insert_before (bsi, stmt1, BSI_SAME_STMT); bsi_replace (bsi, stmt2, true); return stmt1; } return stmt; }
static tree cxx_omp_clause_apply_fn (tree fn, tree arg1, tree arg2) { tree defparm, parm; int i; if (fn == NULL) return NULL; defparm = TREE_CHAIN (TYPE_ARG_TYPES (TREE_TYPE (fn))); if (arg2) defparm = TREE_CHAIN (defparm); if (TREE_CODE (TREE_TYPE (arg1)) == ARRAY_TYPE) { tree inner_type = TREE_TYPE (arg1); tree start1, end1, p1; tree start2 = NULL, p2 = NULL; tree ret = NULL, lab, t; start1 = arg1; start2 = arg2; do { inner_type = TREE_TYPE (inner_type); start1 = build4 (ARRAY_REF, inner_type, start1, size_zero_node, NULL, NULL); if (arg2) start2 = build4 (ARRAY_REF, inner_type, start2, size_zero_node, NULL, NULL); } while (TREE_CODE (inner_type) == ARRAY_TYPE); start1 = build_fold_addr_expr (start1); if (arg2) start2 = build_fold_addr_expr (start2); end1 = TYPE_SIZE_UNIT (TREE_TYPE (arg1)); end1 = fold_convert (TREE_TYPE (start1), end1); end1 = build2 (PLUS_EXPR, TREE_TYPE (start1), start1, end1); p1 = create_tmp_var (TREE_TYPE (start1), NULL); t = build2 (MODIFY_EXPR, void_type_node, p1, start1); append_to_statement_list (t, &ret); if (arg2) { p2 = create_tmp_var (TREE_TYPE (start2), NULL); t = build2 (MODIFY_EXPR, void_type_node, p2, start2); append_to_statement_list (t, &ret); } lab = create_artificial_label (); t = build1 (LABEL_EXPR, void_type_node, lab); append_to_statement_list (t, &ret); t = tree_cons (NULL, p1, NULL); if (arg2) t = tree_cons (NULL, p2, t); /* Handle default arguments. */ i = 1 + (arg2 != NULL); for (parm = defparm; parm != void_list_node; parm = TREE_CHAIN (parm)) t = tree_cons (NULL, convert_default_arg (TREE_VALUE (parm), TREE_PURPOSE (parm), fn, i++), t); t = build_call (fn, nreverse (t)); append_to_statement_list (t, &ret); t = fold_convert (TREE_TYPE (p1), TYPE_SIZE_UNIT (inner_type)); t = build2 (PLUS_EXPR, TREE_TYPE (p1), p1, t); t = build2 (MODIFY_EXPR, void_type_node, p1, t); append_to_statement_list (t, &ret); if (arg2) { t = fold_convert (TREE_TYPE (p2), TYPE_SIZE_UNIT (inner_type)); t = build2 (PLUS_EXPR, TREE_TYPE (p2), p2, t); t = build2 (MODIFY_EXPR, void_type_node, p2, t); append_to_statement_list (t, &ret); } t = build2 (NE_EXPR, boolean_type_node, p1, end1); t = build3 (COND_EXPR, void_type_node, t, build_and_jump (&lab), NULL); append_to_statement_list (t, &ret); return ret; } else { tree t = tree_cons (NULL, build_fold_addr_expr (arg1), NULL); if (arg2) t = tree_cons (NULL, build_fold_addr_expr (arg2), t); /* Handle default arguments. */ i = 1 + (arg2 != NULL); for (parm = defparm; parm != void_list_node; parm = TREE_CHAIN (parm)) t = tree_cons (NULL, convert_default_arg (TREE_VALUE (parm), TREE_PURPOSE (parm), fn, i++), t); return build_call (fn, nreverse (t)); } }
static bool conditional_replacement (basic_block cond_bb, basic_block middle_bb, edge e0, edge e1, gimple phi, tree arg0, tree arg1) { tree result; gimple stmt, new_stmt; tree cond; gimple_stmt_iterator gsi; edge true_edge, false_edge; tree new_var, new_var2; /* FIXME: Gimplification of complex type is too hard for now. */ if (TREE_CODE (TREE_TYPE (arg0)) == COMPLEX_TYPE || TREE_CODE (TREE_TYPE (arg1)) == COMPLEX_TYPE) return false; /* The PHI arguments have the constants 0 and 1, then convert it to the conditional. */ if ((integer_zerop (arg0) && integer_onep (arg1)) || (integer_zerop (arg1) && integer_onep (arg0))) ; else return false; if (!empty_block_p (middle_bb)) return false; /* At this point we know we have a GIMPLE_COND with two successors. One successor is BB, the other successor is an empty block which falls through into BB. There is a single PHI node at the join point (BB) and its arguments are constants (0, 1). So, given the condition COND, and the two PHI arguments, we can rewrite this PHI into non-branching code: dest = (COND) or dest = COND' We use the condition as-is if the argument associated with the true edge has the value one or the argument associated with the false edge as the value zero. Note that those conditions are not the same since only one of the outgoing edges from the GIMPLE_COND will directly reach BB and thus be associated with an argument. */ stmt = last_stmt (cond_bb); result = PHI_RESULT (phi); /* To handle special cases like floating point comparison, it is easier and less error-prone to build a tree and gimplify it on the fly though it is less efficient. */ cond = fold_build2 (gimple_cond_code (stmt), boolean_type_node, gimple_cond_lhs (stmt), gimple_cond_rhs (stmt)); /* We need to know which is the true edge and which is the false edge so that we know when to invert the condition below. */ extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge); if ((e0 == true_edge && integer_zerop (arg0)) || (e0 == false_edge && integer_onep (arg0)) || (e1 == true_edge && integer_zerop (arg1)) || (e1 == false_edge && integer_onep (arg1))) cond = fold_build1 (TRUTH_NOT_EXPR, TREE_TYPE (cond), cond); /* Insert our new statements at the end of conditional block before the COND_STMT. */ gsi = gsi_for_stmt (stmt); new_var = force_gimple_operand_gsi (&gsi, cond, true, NULL, true, GSI_SAME_STMT); if (!useless_type_conversion_p (TREE_TYPE (result), TREE_TYPE (new_var))) { new_var2 = create_tmp_var (TREE_TYPE (result), NULL); add_referenced_var (new_var2); new_stmt = gimple_build_assign_with_ops (CONVERT_EXPR, new_var2, new_var, NULL); new_var2 = make_ssa_name (new_var2, new_stmt); gimple_assign_set_lhs (new_stmt, new_var2); gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT); new_var = new_var2; } replace_phi_edge_with_variable (cond_bb, e1, phi, new_var); /* Note that we optimized this PHI. */ return true; }
static tree fix_conditional_array_notations_1 (tree stmt) { vec<tree, va_gc> *array_list = NULL, *array_operand = NULL; size_t list_size = 0; tree cond = NULL_TREE, builtin_loop = NULL_TREE, new_var = NULL_TREE; size_t rank = 0, ii = 0; tree loop_init; location_t location = EXPR_LOCATION (stmt); tree body = NULL_TREE, loop_with_init = alloc_stmt_list (); vec<vec<an_parts> > an_info = vNULL; vec<an_loop_parts> an_loop_info = vNULL; if (TREE_CODE (stmt) == COND_EXPR) cond = COND_EXPR_COND (stmt); else if (TREE_CODE (stmt) == SWITCH_EXPR) cond = SWITCH_COND (stmt); else if (truth_value_p (TREE_CODE (stmt))) cond = TREE_OPERAND (stmt, 0); else /* Otherwise dont even touch the statement. */ return stmt; if (!find_rank (location, cond, cond, false, &rank)) return error_mark_node; extract_array_notation_exprs (stmt, false, &array_list); loop_init = push_stmt_list (); for (ii = 0; ii < vec_safe_length (array_list); ii++) { tree array_node = (*array_list)[ii]; if (TREE_CODE (array_node) == CALL_EXPR) { builtin_loop = fix_builtin_array_notation_fn (array_node, &new_var); if (builtin_loop == error_mark_node) { add_stmt (error_mark_node); pop_stmt_list (loop_init); return loop_init; } else if (builtin_loop) { vec <tree, va_gc>* sub_list = NULL, *new_var_list = NULL; vec_safe_push (sub_list, array_node); vec_safe_push (new_var_list, new_var); add_stmt (builtin_loop); replace_array_notations (&stmt, false, sub_list, new_var_list); } } } if (!find_rank (location, stmt, stmt, true, &rank)) { pop_stmt_list (loop_init); return error_mark_node; } if (rank == 0) { add_stmt (stmt); pop_stmt_list (loop_init); return loop_init; } extract_array_notation_exprs (stmt, true, &array_list); if (vec_safe_length (array_list) == 0) return stmt; list_size = vec_safe_length (array_list); an_loop_info.safe_grow_cleared (rank); for (ii = 0; ii < list_size; ii++) if ((*array_list)[ii] && TREE_CODE ((*array_list)[ii]) == ARRAY_NOTATION_REF) { tree array_node = (*array_list)[ii]; make_triplet_val_inv (location, &ARRAY_NOTATION_START (array_node)); make_triplet_val_inv (location, &ARRAY_NOTATION_LENGTH (array_node)); make_triplet_val_inv (location, &ARRAY_NOTATION_STRIDE (array_node)); } cilkplus_extract_an_triplets (array_list, list_size, rank, &an_info); for (ii = 0; ii < rank; ii++) { an_loop_info[ii].var = create_tmp_var (integer_type_node); an_loop_info[ii].ind_init = build_modify_expr (location, an_loop_info[ii].var, TREE_TYPE (an_loop_info[ii].var), NOP_EXPR, location, build_int_cst (TREE_TYPE (an_loop_info[ii].var), 0), TREE_TYPE (an_loop_info[ii].var)); } array_operand = create_array_refs (location, an_info, an_loop_info, list_size, rank); replace_array_notations (&stmt, true, array_list, array_operand); create_cmp_incr (location, &an_loop_info, rank, an_info); loop_init = pop_stmt_list (loop_init); body = stmt; append_to_statement_list_force (loop_init, &loop_with_init); for (ii = 0; ii < rank; ii++) { tree new_loop = push_stmt_list (); add_stmt (an_loop_info[ii].ind_init); c_finish_loop (location, an_loop_info[ii].cmp, an_loop_info[ii].incr, body, NULL_TREE, NULL_TREE, true); body = pop_stmt_list (new_loop); } append_to_statement_list_force (body, &loop_with_init); an_loop_info.release (); an_info.release (); return loop_with_init; }
gen_one_condition (tree arg, int lbub, enum tree_code tcode, const char *temp_name1, const char *temp_name2, VEC (gimple, heap) *conds, unsigned *nconds) { tree lbub_real_cst, lbub_cst, float_type; tree temp, tempn, tempc, tempcn; gimple stmt1, stmt2, stmt3; float_type = TREE_TYPE (arg); lbub_cst = build_int_cst (integer_type_node, lbub); lbub_real_cst = build_real_from_int_cst (float_type, lbub_cst); temp = create_tmp_var (float_type, temp_name1); stmt1 = gimple_build_assign (temp, arg); tempn = make_ssa_name (temp, stmt1); gimple_assign_set_lhs (stmt1, tempn); tempc = create_tmp_var (boolean_type_node, temp_name2); stmt2 = gimple_build_assign (tempc, fold_build2 (tcode, boolean_type_node, tempn, lbub_real_cst)); tempcn = make_ssa_name (tempc, stmt2); gimple_assign_set_lhs (stmt2, tempcn); stmt3 = gimple_build_cond_from_tree (tempcn, NULL_TREE, NULL_TREE); VEC_quick_push (gimple, conds, stmt1); VEC_quick_push (gimple, conds, stmt2);
if (strcmp(IDENTIFIER_POINTER(DECL_NAME(var)), "latent_entropy")) continue; latent_entropy_decl = var; // debug_tree(var); break; } if (!latent_entropy_decl) { // debug_tree(current_function_decl); return 0; } } //fprintf(stderr, "latent_entropy: %s\n", IDENTIFIER_POINTER(DECL_NAME(current_function_decl))); // 1. create local entropy variable local_entropy = create_tmp_var(unsigned_intDI_type_node, "local_entropy"); add_referenced_var(local_entropy); mark_sym_for_renaming(local_entropy); // 2. initialize local entropy variable bb = split_block_after_labels(ENTRY_BLOCK_PTR_FOR_FN(cfun))->dest; if (dom_info_available_p(CDI_DOMINATORS)) set_immediate_dominator(CDI_DOMINATORS, bb, ENTRY_BLOCK_PTR_FOR_FN(cfun)); gsi = gsi_start_bb(bb); assign = gimple_build_assign(local_entropy, build_int_cstu(unsigned_intDI_type_node, get_random_const())); // gimple_set_location(assign, loc); gsi_insert_after(&gsi, assign, GSI_NEW_STMT); update_stmt(assign); //debug_bb(bb); gcc_assert(single_succ_p(bb));
static tree cxx_omp_clause_apply_fn (tree fn, tree arg1, tree arg2) { tree defparm, parm, t; int i = 0; int nargs; tree *argarray; if (fn == NULL) return NULL; nargs = list_length (DECL_ARGUMENTS (fn)); argarray = (tree *) alloca (nargs * sizeof (tree)); defparm = TREE_CHAIN (TYPE_ARG_TYPES (TREE_TYPE (fn))); if (arg2) defparm = TREE_CHAIN (defparm); if (TREE_CODE (TREE_TYPE (arg1)) == ARRAY_TYPE) { tree inner_type = TREE_TYPE (arg1); tree start1, end1, p1; tree start2 = NULL, p2 = NULL; tree ret = NULL, lab; start1 = arg1; start2 = arg2; do { inner_type = TREE_TYPE (inner_type); start1 = build4 (ARRAY_REF, inner_type, start1, size_zero_node, NULL, NULL); if (arg2) start2 = build4 (ARRAY_REF, inner_type, start2, size_zero_node, NULL, NULL); } while (TREE_CODE (inner_type) == ARRAY_TYPE); start1 = build_fold_addr_expr (start1); if (arg2) start2 = build_fold_addr_expr (start2); end1 = TYPE_SIZE_UNIT (TREE_TYPE (arg1)); end1 = build2 (POINTER_PLUS_EXPR, TREE_TYPE (start1), start1, end1); p1 = create_tmp_var (TREE_TYPE (start1), NULL); t = build2 (MODIFY_EXPR, TREE_TYPE (p1), p1, start1); append_to_statement_list (t, &ret); if (arg2) { p2 = create_tmp_var (TREE_TYPE (start2), NULL); t = build2 (MODIFY_EXPR, TREE_TYPE (p2), p2, start2); append_to_statement_list (t, &ret); } lab = create_artificial_label (); t = build1 (LABEL_EXPR, void_type_node, lab); append_to_statement_list (t, &ret); argarray[i++] = p1; if (arg2) argarray[i++] = p2; /* Handle default arguments. */ for (parm = defparm; parm && parm != void_list_node; parm = TREE_CHAIN (parm), i++) argarray[i] = convert_default_arg (TREE_VALUE (parm), TREE_PURPOSE (parm), fn, i); t = build_call_a (fn, i, argarray); t = fold_convert (void_type_node, t); t = fold_build_cleanup_point_expr (TREE_TYPE (t), t); append_to_statement_list (t, &ret); t = TYPE_SIZE_UNIT (inner_type); t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (p1), p1, t); t = build2 (MODIFY_EXPR, TREE_TYPE (p1), p1, t); append_to_statement_list (t, &ret); if (arg2) { t = TYPE_SIZE_UNIT (inner_type); t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (p2), p2, t); t = build2 (MODIFY_EXPR, TREE_TYPE (p2), p2, t); append_to_statement_list (t, &ret); } t = build2 (NE_EXPR, boolean_type_node, p1, end1); t = build3 (COND_EXPR, void_type_node, t, build_and_jump (&lab), NULL); append_to_statement_list (t, &ret); return ret; } else { argarray[i++] = build_fold_addr_expr (arg1); if (arg2) argarray[i++] = build_fold_addr_expr (arg2); /* Handle default arguments. */ for (parm = defparm; parm && parm != void_list_node; parm = TREE_CHAIN (parm), i++) argarray[i] = convert_default_arg (TREE_VALUE (parm), TREE_PURPOSE (parm), fn, i); t = build_call_a (fn, i, argarray); t = fold_convert (void_type_node, t); return fold_build_cleanup_point_expr (TREE_TYPE (t), t); } }
static void mf_build_check_statement_for (tree base, tree limit, block_stmt_iterator *instr_bsi, location_t *locus, tree dirflag) { tree_stmt_iterator head, tsi; block_stmt_iterator bsi; basic_block cond_bb, then_bb, join_bb; edge e; tree cond, t, u, v; tree mf_base; tree mf_elem; tree mf_limit; /* We first need to split the current basic block, and start altering the CFG. This allows us to insert the statements we're about to construct into the right basic blocks. */ cond_bb = bb_for_stmt (bsi_stmt (*instr_bsi)); bsi = *instr_bsi; bsi_prev (&bsi); if (! bsi_end_p (bsi)) e = split_block (cond_bb, bsi_stmt (bsi)); else e = split_block_after_labels (cond_bb); cond_bb = e->src; join_bb = e->dest; /* A recap at this point: join_bb is the basic block at whose head is the gimple statement for which this check expression is being built. cond_bb is the (possibly new, synthetic) basic block the end of which will contain the cache-lookup code, and a conditional that jumps to the cache-miss code or, much more likely, over to join_bb. */ /* Create the bb that contains the cache-miss fallback block (mf_check). */ then_bb = create_empty_bb (cond_bb); make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE); make_single_succ_edge (then_bb, join_bb, EDGE_FALLTHRU); /* Mark the pseudo-fallthrough edge from cond_bb to join_bb. */ e = find_edge (cond_bb, join_bb); e->flags = EDGE_FALSE_VALUE; e->count = cond_bb->count; e->probability = REG_BR_PROB_BASE; /* Update dominance info. Note that bb_join's data was updated by split_block. */ if (dom_info_available_p (CDI_DOMINATORS)) { set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb); set_immediate_dominator (CDI_DOMINATORS, join_bb, cond_bb); } /* Build our local variables. */ mf_elem = create_tmp_var (mf_cache_structptr_type, "__mf_elem"); mf_base = create_tmp_var (mf_uintptr_type, "__mf_base"); mf_limit = create_tmp_var (mf_uintptr_type, "__mf_limit"); /* Build: __mf_base = (uintptr_t) <base address expression>. */ t = build2 (MODIFY_EXPR, void_type_node, mf_base, convert (mf_uintptr_type, unshare_expr (base))); SET_EXPR_LOCUS (t, locus); gimplify_to_stmt_list (&t); head = tsi_start (t); tsi = tsi_last (t); /* Build: __mf_limit = (uintptr_t) <limit address expression>. */ t = build2 (MODIFY_EXPR, void_type_node, mf_limit, convert (mf_uintptr_type, unshare_expr (limit))); SET_EXPR_LOCUS (t, locus); gimplify_to_stmt_list (&t); tsi_link_after (&tsi, t, TSI_CONTINUE_LINKING); /* Build: __mf_elem = &__mf_lookup_cache [(__mf_base >> __mf_shift) & __mf_mask]. */ t = build2 (RSHIFT_EXPR, mf_uintptr_type, mf_base, (flag_mudflap_threads ? mf_cache_shift_decl : mf_cache_shift_decl_l)); t = build2 (BIT_AND_EXPR, mf_uintptr_type, t, (flag_mudflap_threads ? mf_cache_mask_decl : mf_cache_mask_decl_l)); t = build4 (ARRAY_REF, TREE_TYPE (TREE_TYPE (mf_cache_array_decl)), mf_cache_array_decl, t, NULL_TREE, NULL_TREE); t = build1 (ADDR_EXPR, mf_cache_structptr_type, t); t = build2 (MODIFY_EXPR, void_type_node, mf_elem, t); SET_EXPR_LOCUS (t, locus); gimplify_to_stmt_list (&t); tsi_link_after (&tsi, t, TSI_CONTINUE_LINKING); /* Quick validity check. if (__mf_elem->low > __mf_base || (__mf_elem_high < __mf_limit)) { __mf_check (); ... and only if single-threaded: __mf_lookup_shift_1 = f...; __mf_lookup_mask_l = ...; } It is expected that this body of code is rarely executed so we mark the edge to the THEN clause of the conditional jump as unlikely. */ /* Construct t <-- '__mf_elem->low > __mf_base'. */ t = build3 (COMPONENT_REF, mf_uintptr_type, build1 (INDIRECT_REF, mf_cache_struct_type, mf_elem), TYPE_FIELDS (mf_cache_struct_type), NULL_TREE); t = build2 (GT_EXPR, boolean_type_node, t, mf_base); /* Construct '__mf_elem->high < __mf_limit'. First build: 1) u <-- '__mf_elem->high' 2) v <-- '__mf_limit'. Then build 'u <-- (u < v). */ u = build3 (COMPONENT_REF, mf_uintptr_type, build1 (INDIRECT_REF, mf_cache_struct_type, mf_elem), TREE_CHAIN (TYPE_FIELDS (mf_cache_struct_type)), NULL_TREE); v = mf_limit; u = build2 (LT_EXPR, boolean_type_node, u, v); /* Build the composed conditional: t <-- 't || u'. Then store the result of the evaluation of 't' in a temporary variable which we can use as the condition for the conditional jump. */ t = build2 (TRUTH_OR_EXPR, boolean_type_node, t, u); cond = create_tmp_var (boolean_type_node, "__mf_unlikely_cond"); t = build2 (MODIFY_EXPR, boolean_type_node, cond, t); gimplify_to_stmt_list (&t); tsi_link_after (&tsi, t, TSI_CONTINUE_LINKING); /* Build the conditional jump. 'cond' is just a temporary so we can simply build a void COND_EXPR. We do need labels in both arms though. */ t = build3 (COND_EXPR, void_type_node, cond, build1 (GOTO_EXPR, void_type_node, tree_block_label (then_bb)), build1 (GOTO_EXPR, void_type_node, tree_block_label (join_bb))); SET_EXPR_LOCUS (t, locus); tsi_link_after (&tsi, t, TSI_CONTINUE_LINKING); /* At this point, after so much hard work, we have only constructed the conditional jump, if (__mf_elem->low > __mf_base || (__mf_elem_high < __mf_limit)) The lowered GIMPLE tree representing this code is in the statement list starting at 'head'. We can insert this now in the current basic block, i.e. the one that the statement we're instrumenting was originally in. */ bsi = bsi_last (cond_bb); for (tsi = head; ! tsi_end_p (tsi); tsi_next (&tsi)) bsi_insert_after (&bsi, tsi_stmt (tsi), BSI_CONTINUE_LINKING); /* Now build up the body of the cache-miss handling: __mf_check(); refresh *_l vars. This is the body of the conditional. */ u = tree_cons (NULL_TREE, mf_file_function_line_tree (locus == NULL ? UNKNOWN_LOCATION : *locus), NULL_TREE); u = tree_cons (NULL_TREE, dirflag, u); /* NB: we pass the overall [base..limit] range to mf_check. */ u = tree_cons (NULL_TREE, fold_build2 (PLUS_EXPR, integer_type_node, fold_build2 (MINUS_EXPR, mf_uintptr_type, mf_limit, mf_base), integer_one_node), u); u = tree_cons (NULL_TREE, mf_base, u); t = build_function_call_expr (mf_check_fndecl, u); gimplify_to_stmt_list (&t); head = tsi_start (t); tsi = tsi_last (t); if (! flag_mudflap_threads) { t = build2 (MODIFY_EXPR, void_type_node, mf_cache_shift_decl_l, mf_cache_shift_decl); tsi_link_after (&tsi, t, TSI_CONTINUE_LINKING); t = build2 (MODIFY_EXPR, void_type_node, mf_cache_mask_decl_l, mf_cache_mask_decl); tsi_link_after (&tsi, t, TSI_CONTINUE_LINKING); } /* Insert the check code in the THEN block. */ bsi = bsi_start (then_bb); for (tsi = head; ! tsi_end_p (tsi); tsi_next (&tsi)) bsi_insert_after (&bsi, tsi_stmt (tsi), BSI_CONTINUE_LINKING); *instr_bsi = bsi_start (join_bb); bsi_next (instr_bsi); }
static tree fix_builtin_array_notation_fn (tree an_builtin_fn, tree *new_var) { tree new_var_type = NULL_TREE, func_parm, new_expr, new_yes_expr, new_no_expr; tree array_ind_value = NULL_TREE, new_no_ind, new_yes_ind, new_no_list; tree new_yes_list, new_cond_expr, new_var_init = NULL_TREE; tree new_exp_init = NULL_TREE; vec<tree, va_gc> *array_list = NULL, *array_operand = NULL; size_t list_size = 0, rank = 0, ii = 0; tree loop_init, array_op0; tree identity_value = NULL_TREE, call_fn = NULL_TREE, new_call_expr, body; location_t location = UNKNOWN_LOCATION; tree loop_with_init = alloc_stmt_list (); vec<vec<an_parts> > an_info = vNULL; vec<an_loop_parts> an_loop_info = vNULL; enum built_in_function an_type = is_cilkplus_reduce_builtin (CALL_EXPR_FN (an_builtin_fn)); if (an_type == BUILT_IN_NONE) return NULL_TREE; /* Builtin call should contain at least one argument. */ if (call_expr_nargs (an_builtin_fn) == 0) { error_at (EXPR_LOCATION (an_builtin_fn), "Invalid builtin arguments"); return error_mark_node; } if (an_type == BUILT_IN_CILKPLUS_SEC_REDUCE || an_type == BUILT_IN_CILKPLUS_SEC_REDUCE_MUTATING) { call_fn = CALL_EXPR_ARG (an_builtin_fn, 2); if (TREE_CODE (call_fn) == ADDR_EXPR) call_fn = TREE_OPERAND (call_fn, 0); identity_value = CALL_EXPR_ARG (an_builtin_fn, 0); func_parm = CALL_EXPR_ARG (an_builtin_fn, 1); } else func_parm = CALL_EXPR_ARG (an_builtin_fn, 0); /* Fully fold any EXCESSIVE_PRECISION EXPR that can occur in the function parameter. */ func_parm = c_fully_fold (func_parm, false, NULL); if (func_parm == error_mark_node) return error_mark_node; location = EXPR_LOCATION (an_builtin_fn); if (!find_rank (location, an_builtin_fn, an_builtin_fn, true, &rank)) return error_mark_node; if (rank == 0) { error_at (location, "Invalid builtin arguments"); return error_mark_node; } else if (rank > 1 && (an_type == BUILT_IN_CILKPLUS_SEC_REDUCE_MAX_IND || an_type == BUILT_IN_CILKPLUS_SEC_REDUCE_MIN_IND)) { error_at (location, "__sec_reduce_min_ind or __sec_reduce_max_ind cannot" " have arrays with dimension greater than 1"); return error_mark_node; } extract_array_notation_exprs (func_parm, true, &array_list); list_size = vec_safe_length (array_list); switch (an_type) { case BUILT_IN_CILKPLUS_SEC_REDUCE_ADD: case BUILT_IN_CILKPLUS_SEC_REDUCE_MUL: case BUILT_IN_CILKPLUS_SEC_REDUCE_MAX: case BUILT_IN_CILKPLUS_SEC_REDUCE_MIN: new_var_type = TREE_TYPE ((*array_list)[0]); break; case BUILT_IN_CILKPLUS_SEC_REDUCE_ALL_ZERO: case BUILT_IN_CILKPLUS_SEC_REDUCE_ALL_NONZERO: case BUILT_IN_CILKPLUS_SEC_REDUCE_ANY_ZERO: case BUILT_IN_CILKPLUS_SEC_REDUCE_ANY_NONZERO: new_var_type = integer_type_node; break; case BUILT_IN_CILKPLUS_SEC_REDUCE_MAX_IND: case BUILT_IN_CILKPLUS_SEC_REDUCE_MIN_IND: new_var_type = integer_type_node; break; case BUILT_IN_CILKPLUS_SEC_REDUCE: if (call_fn && identity_value) new_var_type = TREE_TYPE ((*array_list)[0]); break; case BUILT_IN_CILKPLUS_SEC_REDUCE_MUTATING: new_var_type = NULL_TREE; break; default: gcc_unreachable (); } an_loop_info.safe_grow_cleared (rank); cilkplus_extract_an_triplets (array_list, list_size, rank, &an_info); loop_init = alloc_stmt_list (); for (ii = 0; ii < rank; ii++) { an_loop_info[ii].var = create_tmp_var (integer_type_node); an_loop_info[ii].ind_init = build_modify_expr (location, an_loop_info[ii].var, TREE_TYPE (an_loop_info[ii].var), NOP_EXPR, location, build_int_cst (TREE_TYPE (an_loop_info[ii].var), 0), TREE_TYPE (an_loop_info[ii].var)); } array_operand = create_array_refs (location, an_info, an_loop_info, list_size, rank); replace_array_notations (&func_parm, true, array_list, array_operand); create_cmp_incr (location, &an_loop_info, rank, an_info); if (an_type != BUILT_IN_CILKPLUS_SEC_REDUCE_MUTATING) { *new_var = build_decl (location, VAR_DECL, NULL_TREE, new_var_type); gcc_assert (*new_var && *new_var != error_mark_node); } else *new_var = NULL_TREE; if (an_type == BUILT_IN_CILKPLUS_SEC_REDUCE_MAX_IND || an_type == BUILT_IN_CILKPLUS_SEC_REDUCE_MIN_IND) array_ind_value = build_decl (location, VAR_DECL, NULL_TREE, TREE_TYPE (func_parm)); array_op0 = (*array_operand)[0]; if (TREE_CODE (array_op0) == INDIRECT_REF) array_op0 = TREE_OPERAND (array_op0, 0); switch (an_type) { case BUILT_IN_CILKPLUS_SEC_REDUCE_ADD: new_var_init = build_modify_expr (location, *new_var, TREE_TYPE (*new_var), NOP_EXPR, location, build_zero_cst (new_var_type), new_var_type); new_expr = build_modify_expr (location, *new_var, TREE_TYPE (*new_var), PLUS_EXPR, location, func_parm, TREE_TYPE (func_parm)); break; case BUILT_IN_CILKPLUS_SEC_REDUCE_MUL: new_var_init = build_modify_expr (location, *new_var, TREE_TYPE (*new_var), NOP_EXPR, location, build_one_cst (new_var_type), new_var_type); new_expr = build_modify_expr (location, *new_var, TREE_TYPE (*new_var), MULT_EXPR, location, func_parm, TREE_TYPE (func_parm)); break; case BUILT_IN_CILKPLUS_SEC_REDUCE_ALL_ZERO: new_var_init = build_modify_expr (location, *new_var, TREE_TYPE (*new_var), NOP_EXPR, location, build_one_cst (new_var_type), new_var_type); /* Initially you assume everything is zero, now if we find a case where it is NOT true, then we set the result to false. Otherwise we just keep the previous value. */ new_yes_expr = build_modify_expr (location, *new_var, TREE_TYPE (*new_var), NOP_EXPR, location, build_zero_cst (TREE_TYPE (*new_var)), TREE_TYPE (*new_var)); new_no_expr = build_modify_expr (location, *new_var, TREE_TYPE (*new_var), NOP_EXPR, location, *new_var, TREE_TYPE (*new_var)); new_cond_expr = build2 (NE_EXPR, TREE_TYPE (func_parm), func_parm, build_zero_cst (TREE_TYPE (func_parm))); new_expr = build_conditional_expr (location, new_cond_expr, false, new_yes_expr, TREE_TYPE (new_yes_expr), new_no_expr, TREE_TYPE (new_no_expr)); break; case BUILT_IN_CILKPLUS_SEC_REDUCE_ALL_NONZERO: new_var_init = build_modify_expr (location, *new_var, TREE_TYPE (*new_var), NOP_EXPR, location, build_one_cst (new_var_type), new_var_type); /* Initially you assume everything is non-zero, now if we find a case where it is NOT true, then we set the result to false. Otherwise we just keep the previous value. */ new_yes_expr = build_modify_expr (location, *new_var, TREE_TYPE (*new_var), NOP_EXPR, location, build_zero_cst (TREE_TYPE (*new_var)), TREE_TYPE (*new_var)); new_no_expr = build_modify_expr (location, *new_var, TREE_TYPE (*new_var), NOP_EXPR, location, *new_var, TREE_TYPE (*new_var)); new_cond_expr = build2 (EQ_EXPR, TREE_TYPE (func_parm), func_parm, build_zero_cst (TREE_TYPE (func_parm))); new_expr = build_conditional_expr (location, new_cond_expr, false, new_yes_expr, TREE_TYPE (new_yes_expr), new_no_expr, TREE_TYPE (new_no_expr)); break; case BUILT_IN_CILKPLUS_SEC_REDUCE_ANY_ZERO: new_var_init = build_modify_expr (location, *new_var, TREE_TYPE (*new_var), NOP_EXPR, location, build_zero_cst (new_var_type), new_var_type); /* Initially we assume there are NO zeros in the list. When we find a non-zero, we keep the previous value. If we find a zero, we set the value to true. */ new_yes_expr = build_modify_expr (location, *new_var, TREE_TYPE (*new_var), NOP_EXPR, location, build_one_cst (new_var_type), new_var_type); new_no_expr = build_modify_expr (location, *new_var, TREE_TYPE (*new_var), NOP_EXPR, location, *new_var, TREE_TYPE (*new_var)); new_cond_expr = build2 (EQ_EXPR, TREE_TYPE (func_parm), func_parm, build_zero_cst (TREE_TYPE (func_parm))); new_expr = build_conditional_expr (location, new_cond_expr, false, new_yes_expr, TREE_TYPE (new_yes_expr), new_no_expr, TREE_TYPE (new_no_expr)); break; case BUILT_IN_CILKPLUS_SEC_REDUCE_ANY_NONZERO: new_var_init = build_modify_expr (location, *new_var, TREE_TYPE (*new_var), NOP_EXPR, location, build_zero_cst (new_var_type), new_var_type); /* Initially we assume there are NO non-zeros in the list. When we find a zero, we keep the previous value. If we find a non-zero, we set the value to true. */ new_yes_expr = build_modify_expr (location, *new_var, TREE_TYPE (*new_var), NOP_EXPR, location, build_one_cst (new_var_type), new_var_type); new_no_expr = build_modify_expr (location, *new_var, TREE_TYPE (*new_var), NOP_EXPR, location, *new_var, TREE_TYPE (*new_var)); new_cond_expr = build2 (NE_EXPR, TREE_TYPE (func_parm), func_parm, build_zero_cst (TREE_TYPE (func_parm))); new_expr = build_conditional_expr (location, new_cond_expr, false, new_yes_expr, TREE_TYPE (new_yes_expr), new_no_expr, TREE_TYPE (new_no_expr)); break; case BUILT_IN_CILKPLUS_SEC_REDUCE_MAX: if (TYPE_MIN_VALUE (new_var_type)) new_var_init = build_modify_expr (location, *new_var, TREE_TYPE (*new_var), NOP_EXPR, location, TYPE_MIN_VALUE (new_var_type), new_var_type); else new_var_init = build_modify_expr (location, *new_var, TREE_TYPE (*new_var), NOP_EXPR, location, func_parm, new_var_type); new_no_expr = build_modify_expr (location, *new_var, TREE_TYPE (*new_var), NOP_EXPR, location, *new_var, TREE_TYPE (*new_var)); new_yes_expr = build_modify_expr (location, *new_var, TREE_TYPE (*new_var), NOP_EXPR, location, func_parm, TREE_TYPE (*new_var)); new_expr = build_conditional_expr (location, build2 (LT_EXPR, TREE_TYPE (*new_var), *new_var, func_parm), false, new_yes_expr, TREE_TYPE (*new_var), new_no_expr, TREE_TYPE (*new_var)); break; case BUILT_IN_CILKPLUS_SEC_REDUCE_MIN: if (TYPE_MAX_VALUE (new_var_type)) new_var_init = build_modify_expr (location, *new_var, TREE_TYPE (*new_var), NOP_EXPR, location, TYPE_MAX_VALUE (new_var_type), new_var_type); else new_var_init = build_modify_expr (location, *new_var, TREE_TYPE (*new_var), NOP_EXPR, location, func_parm, new_var_type); new_no_expr = build_modify_expr (location, *new_var, TREE_TYPE (*new_var), NOP_EXPR, location, *new_var, TREE_TYPE (*new_var)); new_yes_expr = build_modify_expr (location, *new_var, TREE_TYPE (*new_var), NOP_EXPR, location, func_parm, TREE_TYPE (*new_var)); new_expr = build_conditional_expr (location, build2 (GT_EXPR, TREE_TYPE (*new_var), *new_var, func_parm), false, new_yes_expr, TREE_TYPE (*new_var), new_no_expr, TREE_TYPE (*new_var)); break; case BUILT_IN_CILKPLUS_SEC_REDUCE_MAX_IND: new_var_init = build_modify_expr (location, *new_var, TREE_TYPE (*new_var), NOP_EXPR, location, build_zero_cst (new_var_type), new_var_type); new_exp_init = build_modify_expr (location, array_ind_value, TREE_TYPE (array_ind_value), NOP_EXPR, location, func_parm, TREE_TYPE (func_parm)); new_no_ind = build_modify_expr (location, *new_var, TREE_TYPE (*new_var), NOP_EXPR, location, *new_var, TREE_TYPE (*new_var)); new_no_expr = build_modify_expr (location, array_ind_value, TREE_TYPE (array_ind_value), NOP_EXPR, location, array_ind_value, TREE_TYPE (array_ind_value)); if (list_size > 1) { new_yes_ind = build_modify_expr (location, *new_var, TREE_TYPE (*new_var), NOP_EXPR, location, an_loop_info[0].var, TREE_TYPE (an_loop_info[0].var)); new_yes_expr = build_modify_expr (location, array_ind_value, TREE_TYPE (array_ind_value), NOP_EXPR, location, func_parm, TREE_TYPE ((*array_operand)[0])); } else { new_yes_ind = build_modify_expr (location, *new_var, TREE_TYPE (*new_var), NOP_EXPR, location, TREE_OPERAND (array_op0, 1), TREE_TYPE (TREE_OPERAND (array_op0, 1))); new_yes_expr = build_modify_expr (location, array_ind_value, TREE_TYPE (array_ind_value), NOP_EXPR, location, func_parm, TREE_OPERAND (array_op0, 1)); } new_yes_list = alloc_stmt_list (); append_to_statement_list (new_yes_ind, &new_yes_list); append_to_statement_list (new_yes_expr, &new_yes_list); new_no_list = alloc_stmt_list (); append_to_statement_list (new_no_ind, &new_no_list); append_to_statement_list (new_no_expr, &new_no_list); new_expr = build_conditional_expr (location, build2 (LE_EXPR, TREE_TYPE (array_ind_value), array_ind_value, func_parm), false, new_yes_list, TREE_TYPE (*new_var), new_no_list, TREE_TYPE (*new_var)); break; case BUILT_IN_CILKPLUS_SEC_REDUCE_MIN_IND: new_var_init = build_modify_expr (location, *new_var, TREE_TYPE (*new_var), NOP_EXPR, location, build_zero_cst (new_var_type), new_var_type); new_exp_init = build_modify_expr (location, array_ind_value, TREE_TYPE (array_ind_value), NOP_EXPR, location, func_parm, TREE_TYPE (func_parm)); new_no_ind = build_modify_expr (location, *new_var, TREE_TYPE (*new_var), NOP_EXPR, location, *new_var, TREE_TYPE (*new_var)); new_no_expr = build_modify_expr (location, array_ind_value, TREE_TYPE (array_ind_value), NOP_EXPR, location, array_ind_value, TREE_TYPE (array_ind_value)); if (list_size > 1) { new_yes_ind = build_modify_expr (location, *new_var, TREE_TYPE (*new_var), NOP_EXPR, location, an_loop_info[0].var, TREE_TYPE (an_loop_info[0].var)); new_yes_expr = build_modify_expr (location, array_ind_value, TREE_TYPE (array_ind_value), NOP_EXPR, location, func_parm, TREE_TYPE (array_op0)); } else { new_yes_ind = build_modify_expr (location, *new_var, TREE_TYPE (*new_var), NOP_EXPR, location, TREE_OPERAND (array_op0, 1), TREE_TYPE (TREE_OPERAND (array_op0, 1))); new_yes_expr = build_modify_expr (location, array_ind_value, TREE_TYPE (array_ind_value), NOP_EXPR, location, func_parm, TREE_OPERAND (array_op0, 1)); } new_yes_list = alloc_stmt_list (); append_to_statement_list (new_yes_ind, &new_yes_list); append_to_statement_list (new_yes_expr, &new_yes_list); new_no_list = alloc_stmt_list (); append_to_statement_list (new_no_ind, &new_no_list); append_to_statement_list (new_no_expr, &new_no_list); new_expr = build_conditional_expr (location, build2 (GE_EXPR, TREE_TYPE (array_ind_value), array_ind_value, func_parm), false, new_yes_list, TREE_TYPE (*new_var), new_no_list, TREE_TYPE (*new_var)); break; case BUILT_IN_CILKPLUS_SEC_REDUCE: new_var_init = build_modify_expr (location, *new_var, TREE_TYPE (*new_var), NOP_EXPR, location, identity_value, new_var_type); new_call_expr = build_call_expr (call_fn, 2, *new_var, func_parm); new_expr = build_modify_expr (location, *new_var, TREE_TYPE (*new_var), NOP_EXPR, location, new_call_expr, TREE_TYPE (*new_var)); break; case BUILT_IN_CILKPLUS_SEC_REDUCE_MUTATING: new_expr = build_call_expr (call_fn, 2, identity_value, func_parm); break; default: gcc_unreachable (); break; } for (ii = 0; ii < rank; ii++) append_to_statement_list (an_loop_info[ii].ind_init, &loop_init); if (an_type == BUILT_IN_CILKPLUS_SEC_REDUCE_MAX_IND || an_type == BUILT_IN_CILKPLUS_SEC_REDUCE_MIN_IND) append_to_statement_list (new_exp_init, &loop_init); if (an_type != BUILT_IN_CILKPLUS_SEC_REDUCE_MUTATING) append_to_statement_list (new_var_init, &loop_init); append_to_statement_list_force (loop_init, &loop_with_init); body = new_expr; for (ii = 0; ii < rank; ii++) { tree new_loop = push_stmt_list (); c_finish_loop (location, an_loop_info[ii].cmp, an_loop_info[ii].incr, body, NULL_TREE, NULL_TREE, true); body = pop_stmt_list (new_loop); } append_to_statement_list_force (body, &loop_with_init); an_info.release (); an_loop_info.release (); return loop_with_init; }
tree build_array_notation_expr (location_t location, tree lhs, tree lhs_origtype, enum tree_code modifycode, location_t rhs_loc, tree rhs, tree rhs_origtype) { bool found_builtin_fn = false; tree array_expr_lhs = NULL_TREE, array_expr_rhs = NULL_TREE; tree array_expr = NULL_TREE; tree an_init = NULL_TREE; vec<tree> cond_expr = vNULL; tree body, loop_with_init = alloc_stmt_list(); tree scalar_mods = NULL_TREE; vec<tree, va_gc> *rhs_array_operand = NULL, *lhs_array_operand = NULL; size_t lhs_rank = 0, rhs_rank = 0; size_t ii = 0; vec<tree, va_gc> *lhs_list = NULL, *rhs_list = NULL; tree new_modify_expr, new_var = NULL_TREE, builtin_loop = NULL_TREE; size_t rhs_list_size = 0, lhs_list_size = 0; vec<vec<an_parts> > lhs_an_info = vNULL, rhs_an_info = vNULL; vec<an_loop_parts> lhs_an_loop_info = vNULL, rhs_an_loop_info = vNULL; /* If either of this is true, an error message must have been send out already. Not necessary to send out multiple error messages. */ if (lhs == error_mark_node || rhs == error_mark_node) return error_mark_node; if (!find_rank (location, rhs, rhs, false, &rhs_rank)) return error_mark_node; extract_array_notation_exprs (rhs, false, &rhs_list); rhs_list_size = vec_safe_length (rhs_list); an_init = push_stmt_list (); if (rhs_rank) { scalar_mods = replace_invariant_exprs (&rhs); if (scalar_mods) add_stmt (scalar_mods); } for (ii = 0; ii < rhs_list_size; ii++) { tree rhs_node = (*rhs_list)[ii]; if (TREE_CODE (rhs_node) == CALL_EXPR) { builtin_loop = fix_builtin_array_notation_fn (rhs_node, &new_var); if (builtin_loop == error_mark_node) { pop_stmt_list (an_init); return error_mark_node; } else if (builtin_loop) { add_stmt (builtin_loop); found_builtin_fn = true; if (new_var) { vec<tree, va_gc> *rhs_sub_list = NULL, *new_var_list = NULL; vec_safe_push (rhs_sub_list, rhs_node); vec_safe_push (new_var_list, new_var); replace_array_notations (&rhs, false, rhs_sub_list, new_var_list); } } } } lhs_rank = 0; rhs_rank = 0; if (!find_rank (location, lhs, lhs, true, &lhs_rank)) { pop_stmt_list (an_init); return error_mark_node; } if (!find_rank (location, rhs, rhs, true, &rhs_rank)) { pop_stmt_list (an_init); return error_mark_node; } if (lhs_rank == 0 && rhs_rank == 0) { if (found_builtin_fn) { new_modify_expr = build_modify_expr (location, lhs, lhs_origtype, modifycode, rhs_loc, rhs, rhs_origtype); add_stmt (new_modify_expr); pop_stmt_list (an_init); return an_init; } else { pop_stmt_list (an_init); return NULL_TREE; } } rhs_list_size = 0; rhs_list = NULL; extract_array_notation_exprs (rhs, true, &rhs_list); extract_array_notation_exprs (lhs, true, &lhs_list); rhs_list_size = vec_safe_length (rhs_list); lhs_list_size = vec_safe_length (lhs_list); if (lhs_rank == 0 && rhs_rank != 0) { tree rhs_base = rhs; if (TREE_CODE (rhs_base) == ARRAY_NOTATION_REF) { for (ii = 0; ii < (size_t) rhs_rank; ii++) rhs_base = ARRAY_NOTATION_ARRAY (rhs); error_at (location, "%qE cannot be scalar when %qE is not", lhs, rhs_base); return error_mark_node; } else { error_at (location, "%qE cannot be scalar when %qE is not", lhs, rhs_base); return error_mark_node; } } if (lhs_rank != 0 && rhs_rank != 0 && lhs_rank != rhs_rank) { error_at (location, "rank mismatch between %qE and %qE", lhs, rhs); pop_stmt_list (an_init); return error_mark_node; } /* Here we assign the array notation components to variable so that we can satisfy the exec once rule. */ for (ii = 0; ii < lhs_list_size; ii++) { tree array_node = (*lhs_list)[ii]; make_triplet_val_inv (location, &ARRAY_NOTATION_START (array_node)); make_triplet_val_inv (location, &ARRAY_NOTATION_LENGTH (array_node)); make_triplet_val_inv (location, &ARRAY_NOTATION_STRIDE (array_node)); } for (ii = 0; ii < rhs_list_size; ii++) if ((*rhs_list)[ii] && TREE_CODE ((*rhs_list)[ii]) == ARRAY_NOTATION_REF) { tree array_node = (*rhs_list)[ii]; make_triplet_val_inv (location, &ARRAY_NOTATION_START (array_node)); make_triplet_val_inv (location, &ARRAY_NOTATION_LENGTH (array_node)); make_triplet_val_inv (location, &ARRAY_NOTATION_STRIDE (array_node)); } cond_expr.safe_grow_cleared (MAX (lhs_rank, rhs_rank)); lhs_an_loop_info.safe_grow_cleared (lhs_rank); if (rhs_rank) rhs_an_loop_info.safe_grow_cleared (rhs_rank); cilkplus_extract_an_triplets (lhs_list, lhs_list_size, lhs_rank, &lhs_an_info); if (rhs_rank) { rhs_an_loop_info.safe_grow_cleared (rhs_rank); cilkplus_extract_an_triplets (rhs_list, rhs_list_size, rhs_rank, &rhs_an_info); } if (length_mismatch_in_expr_p (EXPR_LOCATION (lhs), lhs_an_info) || (rhs_rank && length_mismatch_in_expr_p (EXPR_LOCATION (rhs), rhs_an_info))) { pop_stmt_list (an_init); return error_mark_node; } if (lhs_list_size > 0 && rhs_list_size > 0 && lhs_rank > 0 && rhs_rank > 0 && TREE_CODE (lhs_an_info[0][0].length) == INTEGER_CST && rhs_an_info[0][0].length && TREE_CODE (rhs_an_info[0][0].length) == INTEGER_CST) { HOST_WIDE_INT l_length = int_cst_value (lhs_an_info[0][0].length); HOST_WIDE_INT r_length = int_cst_value (rhs_an_info[0][0].length); /* Length can be negative or positive. As long as the magnitude is OK, then the array notation is valid. */ if (absu_hwi (l_length) != absu_hwi (r_length)) { error_at (location, "length mismatch between LHS and RHS"); pop_stmt_list (an_init); return error_mark_node; } } for (ii = 0; ii < lhs_rank; ii++) if (lhs_an_info[0][ii].is_vector) { lhs_an_loop_info[ii].var = create_tmp_var (integer_type_node); lhs_an_loop_info[ii].ind_init = build_modify_expr (location, lhs_an_loop_info[ii].var, TREE_TYPE (lhs_an_loop_info[ii].var), NOP_EXPR, location, build_zero_cst (TREE_TYPE (lhs_an_loop_info[ii].var)), TREE_TYPE (lhs_an_loop_info[ii].var)); } for (ii = 0; ii < rhs_rank; ii++) { /* When we have a polynomial, we assume that the indices are of type integer. */ rhs_an_loop_info[ii].var = create_tmp_var (integer_type_node); rhs_an_loop_info[ii].ind_init = build_modify_expr (location, rhs_an_loop_info[ii].var, TREE_TYPE (rhs_an_loop_info[ii].var), NOP_EXPR, location, build_int_cst (TREE_TYPE (rhs_an_loop_info[ii].var), 0), TREE_TYPE (rhs_an_loop_info[ii].var)); } if (lhs_rank) { lhs_array_operand = create_array_refs (location, lhs_an_info, lhs_an_loop_info, lhs_list_size, lhs_rank); replace_array_notations (&lhs, true, lhs_list, lhs_array_operand); array_expr_lhs = lhs; } if (rhs_array_operand) vec_safe_truncate (rhs_array_operand, 0); if (rhs_rank) { rhs_array_operand = create_array_refs (location, rhs_an_info, rhs_an_loop_info, rhs_list_size, rhs_rank); replace_array_notations (&rhs, true, rhs_list, rhs_array_operand); vec_safe_truncate (rhs_array_operand, 0); rhs_array_operand = fix_sec_implicit_args (location, rhs_list, rhs_an_loop_info, rhs_rank, rhs); if (!rhs_array_operand) return error_mark_node; replace_array_notations (&rhs, true, rhs_list, rhs_array_operand); } else if (rhs_list_size > 0) { rhs_array_operand = fix_sec_implicit_args (location, rhs_list, lhs_an_loop_info, lhs_rank, lhs); if (!rhs_array_operand) return error_mark_node; replace_array_notations (&rhs, true, rhs_list, rhs_array_operand); } array_expr_lhs = lhs; array_expr_rhs = rhs; array_expr = build_modify_expr (location, array_expr_lhs, lhs_origtype, modifycode, rhs_loc, array_expr_rhs, rhs_origtype); create_cmp_incr (location, &lhs_an_loop_info, lhs_rank, lhs_an_info); if (rhs_rank) create_cmp_incr (location, &rhs_an_loop_info, rhs_rank, rhs_an_info); for (ii = 0; ii < MAX (lhs_rank, rhs_rank); ii++) if (ii < lhs_rank && ii < rhs_rank) cond_expr[ii] = build2 (TRUTH_ANDIF_EXPR, boolean_type_node, lhs_an_loop_info[ii].cmp, rhs_an_loop_info[ii].cmp); else if (ii < lhs_rank && ii >= rhs_rank) cond_expr[ii] = lhs_an_loop_info[ii].cmp; else gcc_unreachable (); an_init = pop_stmt_list (an_init); append_to_statement_list_force (an_init, &loop_with_init); body = array_expr; for (ii = 0; ii < MAX (lhs_rank, rhs_rank); ii++) { tree incr_list = alloc_stmt_list (); tree new_loop = push_stmt_list (); if (lhs_rank) add_stmt (lhs_an_loop_info[ii].ind_init); if (rhs_rank) add_stmt (rhs_an_loop_info[ii].ind_init); if (lhs_rank) append_to_statement_list_force (lhs_an_loop_info[ii].incr, &incr_list); if (rhs_rank && rhs_an_loop_info[ii].incr) append_to_statement_list_force (rhs_an_loop_info[ii].incr, &incr_list); c_finish_loop (location, cond_expr[ii], incr_list, body, NULL_TREE, NULL_TREE, true); body = pop_stmt_list (new_loop); } append_to_statement_list_force (body, &loop_with_init); lhs_an_info.release (); lhs_an_loop_info.release (); if (rhs_rank) { rhs_an_info.release (); rhs_an_loop_info.release (); } cond_expr.release (); return loop_with_init; }
static void gen_alloca_stmts(gimple_seq* stmts_, tree* sp_) { /* doc: tree-complex.c, c-common.c, omp-low.c */ /* create the stmt sequence so that: *sp = alloca(sizeof(type)); type: the type to be stacked stmts: the allocation sequence sp: the allocated area ptr */ gimple call; gimple_seq stmts; tree binop; tree mul; tree val; tree count; tree sizeuf; tree sp; tree ref; gimple assign; gimple_stmt_iterator gsi; /* build an empty sequence */ stmts = gimple_seq_alloc(); gsi = gsi_last(stmts); #if 0 /* fixme */ sp = create_tmp_var(ptr_type_node, NULL); #else /* from varpool.c */ sp = add_new_static_var(ptr_type_node); #endif /* sp = __builtin_alloca(count * sizeof(uintptr_t)); */ count = build_int_cst(integer_type_node, 2); sizeuf = build_int_cst(integer_type_node, sizeof(uintptr_t)); mul = build2(MULT_EXPR, integer_type_node, count, sizeuf); /* call = gimple_build_call(built_in_decls[BUILT_IN_ALLOCA], 1, mul); */ call = gimple_build_call(kaapi_pushdata_aligned_decl, 1, mul); gimple_call_set_lhs(call, sp); gsi_insert_after(&gsi, call, GSI_CONTINUE_LINKING); /* *(sp + 0) = 42; */ ref = build1(INDIRECT_REF, TREE_TYPE(sp), sp); val = build_int_cst(ptr_type_node, 0xdeadc0d3); assign = gimple_build_assign(ref, val); gsi_insert_after(&gsi, assign, GSI_CONTINUE_LINKING); /* *(sp + 8) = 24 */ binop = build_binary_op (0, PLUS_EXPR, convert(integer_type_node, sp), sizeuf, 0); ref = build1(INDIRECT_REF, TREE_TYPE(binop), binop); val = build_int_cst(ptr_type_node, 0xdeadc003); assign = gimple_build_assign(ref, val); gsi_insert_after(&gsi, assign, GSI_CONTINUE_LINKING); /* affect results */ *sp_ = sp; *stmts_ = stmts; }
static unsigned int lower_function_body (void) { struct lower_data data; gimple_seq body = gimple_body (current_function_decl); gimple_seq lowered_body; gimple_stmt_iterator i; gimple bind; tree t; gimple x; /* The gimplifier should've left a body of exactly one statement, namely a GIMPLE_BIND. */ gcc_assert (gimple_seq_first (body) == gimple_seq_last (body) && gimple_code (gimple_seq_first_stmt (body)) == GIMPLE_BIND); memset (&data, 0, sizeof (data)); data.block = DECL_INITIAL (current_function_decl); BLOCK_SUBBLOCKS (data.block) = NULL_TREE; BLOCK_CHAIN (data.block) = NULL_TREE; TREE_ASM_WRITTEN (data.block) = 1; data.return_statements.create (8); bind = gimple_seq_first_stmt (body); lowered_body = NULL; gimple_seq_add_stmt (&lowered_body, bind); i = gsi_start (lowered_body); lower_gimple_bind (&i, &data); i = gsi_last (lowered_body); /* If the function falls off the end, we need a null return statement. If we've already got one in the return_statements vector, we don't need to do anything special. Otherwise build one by hand. */ if (gimple_seq_may_fallthru (lowered_body) && (data.return_statements.is_empty () || gimple_return_retval (data.return_statements.last().stmt) != NULL)) { x = gimple_build_return (NULL); gimple_set_location (x, cfun->function_end_locus); gimple_set_block (x, DECL_INITIAL (current_function_decl)); gsi_insert_after (&i, x, GSI_CONTINUE_LINKING); } /* If we lowered any return statements, emit the representative at the end of the function. */ while (!data.return_statements.is_empty ()) { return_statements_t t = data.return_statements.pop (); x = gimple_build_label (t.label); gsi_insert_after (&i, x, GSI_CONTINUE_LINKING); gsi_insert_after (&i, t.stmt, GSI_CONTINUE_LINKING); } /* If the function calls __builtin_setjmp, we need to emit the computed goto that will serve as the unique dispatcher for all the receivers. */ if (data.calls_builtin_setjmp) { tree disp_label, disp_var, arg; /* Build 'DISP_LABEL:' and insert. */ disp_label = create_artificial_label (cfun->function_end_locus); /* This mark will create forward edges from every call site. */ DECL_NONLOCAL (disp_label) = 1; cfun->has_nonlocal_label = 1; x = gimple_build_label (disp_label); gsi_insert_after (&i, x, GSI_CONTINUE_LINKING); /* Build 'DISP_VAR = __builtin_setjmp_dispatcher (DISP_LABEL);' and insert. */ disp_var = create_tmp_var (ptr_type_node, "setjmpvar"); arg = build_addr (disp_label, current_function_decl); t = builtin_decl_implicit (BUILT_IN_SETJMP_DISPATCHER); x = gimple_build_call (t, 1, arg); gimple_call_set_lhs (x, disp_var); /* Build 'goto DISP_VAR;' and insert. */ gsi_insert_after (&i, x, GSI_CONTINUE_LINKING); x = gimple_build_goto (disp_var); gsi_insert_after (&i, x, GSI_CONTINUE_LINKING); } /* Once the old body has been lowered, replace it with the new lowered sequence. */ gimple_set_body (current_function_decl, lowered_body); gcc_assert (data.block == DECL_INITIAL (current_function_decl)); BLOCK_SUBBLOCKS (data.block) = blocks_nreverse (BLOCK_SUBBLOCKS (data.block)); clear_block_marks (data.block); data.return_statements.release (); return 0; }
void omp_extract_for_data (gomp_for *for_stmt, struct omp_for_data *fd, struct omp_for_data_loop *loops) { tree t, var, *collapse_iter, *collapse_count; tree count = NULL_TREE, iter_type = long_integer_type_node; struct omp_for_data_loop *loop; int i; struct omp_for_data_loop dummy_loop; location_t loc = gimple_location (for_stmt); bool simd = gimple_omp_for_kind (for_stmt) & GF_OMP_FOR_SIMD; bool distribute = gimple_omp_for_kind (for_stmt) == GF_OMP_FOR_KIND_DISTRIBUTE; bool taskloop = gimple_omp_for_kind (for_stmt) == GF_OMP_FOR_KIND_TASKLOOP; tree iterv, countv; fd->for_stmt = for_stmt; fd->pre = NULL; if (gimple_omp_for_collapse (for_stmt) > 1) fd->loops = loops; else fd->loops = &fd->loop; fd->have_nowait = distribute || simd; fd->have_ordered = false; fd->collapse = 1; fd->ordered = 0; fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC; fd->sched_modifiers = 0; fd->chunk_size = NULL_TREE; fd->simd_schedule = false; if (gimple_omp_for_kind (fd->for_stmt) == GF_OMP_FOR_KIND_CILKFOR) fd->sched_kind = OMP_CLAUSE_SCHEDULE_CILKFOR; collapse_iter = NULL; collapse_count = NULL; for (t = gimple_omp_for_clauses (for_stmt); t ; t = OMP_CLAUSE_CHAIN (t)) switch (OMP_CLAUSE_CODE (t)) { case OMP_CLAUSE_NOWAIT: fd->have_nowait = true; break; case OMP_CLAUSE_ORDERED: fd->have_ordered = true; if (OMP_CLAUSE_ORDERED_EXPR (t)) fd->ordered = tree_to_shwi (OMP_CLAUSE_ORDERED_EXPR (t)); break; case OMP_CLAUSE_SCHEDULE: gcc_assert (!distribute && !taskloop); fd->sched_kind = (enum omp_clause_schedule_kind) (OMP_CLAUSE_SCHEDULE_KIND (t) & OMP_CLAUSE_SCHEDULE_MASK); fd->sched_modifiers = (OMP_CLAUSE_SCHEDULE_KIND (t) & ~OMP_CLAUSE_SCHEDULE_MASK); fd->chunk_size = OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (t); fd->simd_schedule = OMP_CLAUSE_SCHEDULE_SIMD (t); break; case OMP_CLAUSE_DIST_SCHEDULE: gcc_assert (distribute); fd->chunk_size = OMP_CLAUSE_DIST_SCHEDULE_CHUNK_EXPR (t); break; case OMP_CLAUSE_COLLAPSE: fd->collapse = tree_to_shwi (OMP_CLAUSE_COLLAPSE_EXPR (t)); if (fd->collapse > 1) { collapse_iter = &OMP_CLAUSE_COLLAPSE_ITERVAR (t); collapse_count = &OMP_CLAUSE_COLLAPSE_COUNT (t); } break; default: break; } if (fd->ordered && fd->collapse == 1 && loops != NULL) { fd->loops = loops; iterv = NULL_TREE; countv = NULL_TREE; collapse_iter = &iterv; collapse_count = &countv; } /* FIXME: for now map schedule(auto) to schedule(static). There should be analysis to determine whether all iterations are approximately the same amount of work (then schedule(static) is best) or if it varies (then schedule(dynamic,N) is better). */ if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_AUTO) { fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC; gcc_assert (fd->chunk_size == NULL); } gcc_assert (fd->collapse == 1 || collapse_iter != NULL); if (taskloop) fd->sched_kind = OMP_CLAUSE_SCHEDULE_RUNTIME; if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME) gcc_assert (fd->chunk_size == NULL); else if (fd->chunk_size == NULL) { /* We only need to compute a default chunk size for ordered static loops and dynamic loops. */ if (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC || fd->have_ordered) fd->chunk_size = (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC) ? integer_zero_node : integer_one_node; } int cnt = fd->ordered ? fd->ordered : fd->collapse; for (i = 0; i < cnt; i++) { if (i == 0 && fd->collapse == 1 && (fd->ordered == 0 || loops == NULL)) loop = &fd->loop; else if (loops != NULL) loop = loops + i; else loop = &dummy_loop; loop->v = gimple_omp_for_index (for_stmt, i); gcc_assert (SSA_VAR_P (loop->v)); gcc_assert (TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE || TREE_CODE (TREE_TYPE (loop->v)) == POINTER_TYPE); var = TREE_CODE (loop->v) == SSA_NAME ? SSA_NAME_VAR (loop->v) : loop->v; loop->n1 = gimple_omp_for_initial (for_stmt, i); loop->cond_code = gimple_omp_for_cond (for_stmt, i); loop->n2 = gimple_omp_for_final (for_stmt, i); gcc_assert (loop->cond_code != NE_EXPR || gimple_omp_for_kind (for_stmt) == GF_OMP_FOR_KIND_CILKSIMD || gimple_omp_for_kind (for_stmt) == GF_OMP_FOR_KIND_CILKFOR); omp_adjust_for_condition (loc, &loop->cond_code, &loop->n2); t = gimple_omp_for_incr (for_stmt, i); gcc_assert (TREE_OPERAND (t, 0) == var); loop->step = omp_get_for_step_from_incr (loc, t); if (simd || (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC && !fd->have_ordered)) { if (fd->collapse == 1) iter_type = TREE_TYPE (loop->v); else if (i == 0 || TYPE_PRECISION (iter_type) < TYPE_PRECISION (TREE_TYPE (loop->v))) iter_type = build_nonstandard_integer_type (TYPE_PRECISION (TREE_TYPE (loop->v)), 1); } else if (iter_type != long_long_unsigned_type_node) { if (POINTER_TYPE_P (TREE_TYPE (loop->v))) iter_type = long_long_unsigned_type_node; else if (TYPE_UNSIGNED (TREE_TYPE (loop->v)) && TYPE_PRECISION (TREE_TYPE (loop->v)) >= TYPE_PRECISION (iter_type)) { tree n; if (loop->cond_code == LT_EXPR) n = fold_build2_loc (loc, PLUS_EXPR, TREE_TYPE (loop->v), loop->n2, loop->step); else n = loop->n1; if (TREE_CODE (n) != INTEGER_CST || tree_int_cst_lt (TYPE_MAX_VALUE (iter_type), n)) iter_type = long_long_unsigned_type_node; } else if (TYPE_PRECISION (TREE_TYPE (loop->v)) > TYPE_PRECISION (iter_type)) { tree n1, n2; if (loop->cond_code == LT_EXPR) { n1 = loop->n1; n2 = fold_build2_loc (loc, PLUS_EXPR, TREE_TYPE (loop->v), loop->n2, loop->step); } else { n1 = fold_build2_loc (loc, MINUS_EXPR, TREE_TYPE (loop->v), loop->n2, loop->step); n2 = loop->n1; } if (TREE_CODE (n1) != INTEGER_CST || TREE_CODE (n2) != INTEGER_CST || !tree_int_cst_lt (TYPE_MIN_VALUE (iter_type), n1) || !tree_int_cst_lt (n2, TYPE_MAX_VALUE (iter_type))) iter_type = long_long_unsigned_type_node; } } if (i >= fd->collapse) continue; if (collapse_count && *collapse_count == NULL) { t = fold_binary (loop->cond_code, boolean_type_node, fold_convert (TREE_TYPE (loop->v), loop->n1), fold_convert (TREE_TYPE (loop->v), loop->n2)); if (t && integer_zerop (t)) count = build_zero_cst (long_long_unsigned_type_node); else if ((i == 0 || count != NULL_TREE) && TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE && TREE_CONSTANT (loop->n1) && TREE_CONSTANT (loop->n2) && TREE_CODE (loop->step) == INTEGER_CST) { tree itype = TREE_TYPE (loop->v); if (POINTER_TYPE_P (itype)) itype = signed_type_for (itype); t = build_int_cst (itype, (loop->cond_code == LT_EXPR ? -1 : 1)); t = fold_build2_loc (loc, PLUS_EXPR, itype, fold_convert_loc (loc, itype, loop->step), t); t = fold_build2_loc (loc, PLUS_EXPR, itype, t, fold_convert_loc (loc, itype, loop->n2)); t = fold_build2_loc (loc, MINUS_EXPR, itype, t, fold_convert_loc (loc, itype, loop->n1)); if (TYPE_UNSIGNED (itype) && loop->cond_code == GT_EXPR) t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype, fold_build1_loc (loc, NEGATE_EXPR, itype, t), fold_build1_loc (loc, NEGATE_EXPR, itype, fold_convert_loc (loc, itype, loop->step))); else t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype, t, fold_convert_loc (loc, itype, loop->step)); t = fold_convert_loc (loc, long_long_unsigned_type_node, t); if (count != NULL_TREE) count = fold_build2_loc (loc, MULT_EXPR, long_long_unsigned_type_node, count, t); else count = t; if (TREE_CODE (count) != INTEGER_CST) count = NULL_TREE; } else if (count && !integer_zerop (count)) count = NULL_TREE; } } if (count && !simd && (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC || fd->have_ordered)) { if (!tree_int_cst_lt (count, TYPE_MAX_VALUE (long_integer_type_node))) iter_type = long_long_unsigned_type_node; else iter_type = long_integer_type_node; } else if (collapse_iter && *collapse_iter != NULL) iter_type = TREE_TYPE (*collapse_iter); fd->iter_type = iter_type; if (collapse_iter && *collapse_iter == NULL) *collapse_iter = create_tmp_var (iter_type, ".iter"); if (collapse_count && *collapse_count == NULL) { if (count) *collapse_count = fold_convert_loc (loc, iter_type, count); else *collapse_count = create_tmp_var (iter_type, ".count"); } if (fd->collapse > 1 || (fd->ordered && loops)) { fd->loop.v = *collapse_iter; fd->loop.n1 = build_int_cst (TREE_TYPE (fd->loop.v), 0); fd->loop.n2 = *collapse_count; fd->loop.step = build_int_cst (TREE_TYPE (fd->loop.v), 1); fd->loop.cond_code = LT_EXPR; } else if (loops) loops[0] = fd->loop; }
void aarch64_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update) { const unsigned AARCH64_FE_INVALID = 1; const unsigned AARCH64_FE_DIVBYZERO = 2; const unsigned AARCH64_FE_OVERFLOW = 4; const unsigned AARCH64_FE_UNDERFLOW = 8; const unsigned AARCH64_FE_INEXACT = 16; const unsigned HOST_WIDE_INT AARCH64_FE_ALL_EXCEPT = (AARCH64_FE_INVALID | AARCH64_FE_DIVBYZERO | AARCH64_FE_OVERFLOW | AARCH64_FE_UNDERFLOW | AARCH64_FE_INEXACT); const unsigned HOST_WIDE_INT AARCH64_FE_EXCEPT_SHIFT = 8; tree fenv_cr, fenv_sr, get_fpcr, set_fpcr, mask_cr, mask_sr; tree ld_fenv_cr, ld_fenv_sr, masked_fenv_cr, masked_fenv_sr, hold_fnclex_cr; tree hold_fnclex_sr, new_fenv_var, reload_fenv, restore_fnenv, get_fpsr, set_fpsr; tree update_call, atomic_feraiseexcept, hold_fnclex, masked_fenv, ld_fenv; /* Generate the equivalence of : unsigned int fenv_cr; fenv_cr = __builtin_aarch64_get_fpcr (); unsigned int fenv_sr; fenv_sr = __builtin_aarch64_get_fpsr (); Now set all exceptions to non-stop unsigned int mask_cr = ~(AARCH64_FE_ALL_EXCEPT << AARCH64_FE_EXCEPT_SHIFT); unsigned int masked_cr; masked_cr = fenv_cr & mask_cr; And clear all exception flags unsigned int maske_sr = ~AARCH64_FE_ALL_EXCEPT; unsigned int masked_cr; masked_sr = fenv_sr & mask_sr; __builtin_aarch64_set_cr (masked_cr); __builtin_aarch64_set_sr (masked_sr); */ fenv_cr = create_tmp_var (unsigned_type_node, NULL); fenv_sr = create_tmp_var (unsigned_type_node, NULL); get_fpcr = aarch64_builtin_decls[AARCH64_BUILTIN_GET_FPCR]; set_fpcr = aarch64_builtin_decls[AARCH64_BUILTIN_SET_FPCR]; get_fpsr = aarch64_builtin_decls[AARCH64_BUILTIN_GET_FPSR]; set_fpsr = aarch64_builtin_decls[AARCH64_BUILTIN_SET_FPSR]; mask_cr = build_int_cst (unsigned_type_node, ~(AARCH64_FE_ALL_EXCEPT << AARCH64_FE_EXCEPT_SHIFT)); mask_sr = build_int_cst (unsigned_type_node, ~(AARCH64_FE_ALL_EXCEPT)); ld_fenv_cr = build2 (MODIFY_EXPR, unsigned_type_node, fenv_cr, build_call_expr (get_fpcr, 0)); ld_fenv_sr = build2 (MODIFY_EXPR, unsigned_type_node, fenv_sr, build_call_expr (get_fpsr, 0)); masked_fenv_cr = build2 (BIT_AND_EXPR, unsigned_type_node, fenv_cr, mask_cr); masked_fenv_sr = build2 (BIT_AND_EXPR, unsigned_type_node, fenv_sr, mask_sr); hold_fnclex_cr = build_call_expr (set_fpcr, 1, masked_fenv_cr); hold_fnclex_sr = build_call_expr (set_fpsr, 1, masked_fenv_sr); hold_fnclex = build2 (COMPOUND_EXPR, void_type_node, hold_fnclex_cr, hold_fnclex_sr); masked_fenv = build2 (COMPOUND_EXPR, void_type_node, masked_fenv_cr, masked_fenv_sr); ld_fenv = build2 (COMPOUND_EXPR, void_type_node, ld_fenv_cr, ld_fenv_sr); *hold = build2 (COMPOUND_EXPR, void_type_node, build2 (COMPOUND_EXPR, void_type_node, masked_fenv, ld_fenv), hold_fnclex); /* Store the value of masked_fenv to clear the exceptions: __builtin_aarch64_set_fpsr (masked_fenv_sr); */ *clear = build_call_expr (set_fpsr, 1, masked_fenv_sr); /* Generate the equivalent of : unsigned int new_fenv_var; new_fenv_var = __builtin_aarch64_get_fpsr (); __builtin_aarch64_set_fpsr (fenv_sr); __atomic_feraiseexcept (new_fenv_var); */ new_fenv_var = create_tmp_var (unsigned_type_node, NULL); reload_fenv = build2 (MODIFY_EXPR, unsigned_type_node, new_fenv_var, build_call_expr (get_fpsr, 0)); restore_fnenv = build_call_expr (set_fpsr, 1, fenv_sr); atomic_feraiseexcept = builtin_decl_implicit (BUILT_IN_ATOMIC_FERAISEEXCEPT); update_call = build_call_expr (atomic_feraiseexcept, 1, fold_convert (integer_type_node, new_fenv_var)); *update = build2 (COMPOUND_EXPR, void_type_node, build2 (COMPOUND_EXPR, void_type_node, reload_fenv, restore_fnenv), update_call); }
static bool abs_replacement (basic_block cond_bb, basic_block middle_bb, edge e0 ATTRIBUTE_UNUSED, edge e1, gimple phi, tree arg0, tree arg1) { tree result; gimple new_stmt, cond; gimple_stmt_iterator gsi; edge true_edge, false_edge; gimple assign; edge e; tree rhs, lhs; bool negate; enum tree_code cond_code; /* If the type says honor signed zeros we cannot do this optimization. */ if (HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (arg1)))) return false; /* OTHER_BLOCK must have only one executable statement which must have the form arg0 = -arg1 or arg1 = -arg0. */ assign = last_and_only_stmt (middle_bb); /* If we did not find the proper negation assignment, then we can not optimize. */ if (assign == NULL) return false; /* If we got here, then we have found the only executable statement in OTHER_BLOCK. If it is anything other than arg = -arg1 or arg1 = -arg0, then we can not optimize. */ if (gimple_code (assign) != GIMPLE_ASSIGN) return false; lhs = gimple_assign_lhs (assign); if (gimple_assign_rhs_code (assign) != NEGATE_EXPR) return false; rhs = gimple_assign_rhs1 (assign); /* The assignment has to be arg0 = -arg1 or arg1 = -arg0. */ if (!(lhs == arg0 && rhs == arg1) && !(lhs == arg1 && rhs == arg0)) return false; cond = last_stmt (cond_bb); result = PHI_RESULT (phi); /* Only relationals comparing arg[01] against zero are interesting. */ cond_code = gimple_cond_code (cond); if (cond_code != GT_EXPR && cond_code != GE_EXPR && cond_code != LT_EXPR && cond_code != LE_EXPR) return false; /* Make sure the conditional is arg[01] OP y. */ if (gimple_cond_lhs (cond) != rhs) return false; if (FLOAT_TYPE_P (TREE_TYPE (gimple_cond_rhs (cond))) ? real_zerop (gimple_cond_rhs (cond)) : integer_zerop (gimple_cond_rhs (cond))) ; else return false; /* We need to know which is the true edge and which is the false edge so that we know if have abs or negative abs. */ extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge); /* For GT_EXPR/GE_EXPR, if the true edge goes to OTHER_BLOCK, then we will need to negate the result. Similarly for LT_EXPR/LE_EXPR if the false edge goes to OTHER_BLOCK. */ if (cond_code == GT_EXPR || cond_code == GE_EXPR) e = true_edge; else e = false_edge; if (e->dest == middle_bb) negate = true; else negate = false; result = duplicate_ssa_name (result, NULL); if (negate) { tree tmp = create_tmp_var (TREE_TYPE (result), NULL); add_referenced_var (tmp); lhs = make_ssa_name (tmp, NULL); } else lhs = result; /* Build the modify expression with abs expression. */ new_stmt = gimple_build_assign_with_ops (ABS_EXPR, lhs, rhs, NULL); gsi = gsi_last_bb (cond_bb); gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT); if (negate) { /* Get the right GSI. We want to insert after the recently added ABS_EXPR statement (which we know is the first statement in the block. */ new_stmt = gimple_build_assign_with_ops (NEGATE_EXPR, result, lhs, NULL); gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT); } replace_phi_edge_with_variable (cond_bb, e1, phi, result); /* Note that we optimized this PHI. */ return true; }
static void gen_conditions_for_pow_int_base (tree base, tree expn, vec<gimple> conds, unsigned *nconds) { gimple base_def; tree base_val0; tree int_type; tree temp, tempn; tree cst0; gimple stmt1, stmt2; int bit_sz, max_exp; inp_domain exp_domain; base_def = SSA_NAME_DEF_STMT (base); base_val0 = gimple_assign_rhs1 (base_def); int_type = TREE_TYPE (base_val0); bit_sz = TYPE_PRECISION (int_type); gcc_assert (bit_sz > 0 && bit_sz <= MAX_BASE_INT_BIT_SIZE); /* Determine the max exp argument value according to the size of the base integer. The max exp value is conservatively estimated assuming IEEE754 double precision format. */ if (bit_sz == 8) max_exp = 128; else if (bit_sz == 16) max_exp = 64; else { gcc_assert (bit_sz == MAX_BASE_INT_BIT_SIZE); max_exp = 32; } /* For pow ((double)x, y), generate the following conditions: cond 1: temp1 = x; if (temp1 <= 0) cond 2: temp2 = y; if (temp2 > max_exp_real_cst) */ /* Generate condition in reverse order -- first the condition for the exp argument. */ exp_domain = get_domain (0, false, false, max_exp, true, true); gen_conditions_for_domain (expn, exp_domain, conds, nconds); /* Now generate condition for the base argument. Note it does not use the helper function gen_conditions_for_domain because the base type is integer. */ /* Push a separator. */ conds.quick_push (NULL); temp = create_tmp_var (int_type, "DCE_COND1"); cst0 = build_int_cst (int_type, 0); stmt1 = gimple_build_assign (temp, base_val0); tempn = make_ssa_name (temp, stmt1); gimple_assign_set_lhs (stmt1, tempn); stmt2 = gimple_build_cond (LE_EXPR, tempn, cst0, NULL_TREE, NULL_TREE); conds.quick_push (stmt1); conds.quick_push (stmt2); (*nconds)++; }
static void insert_call_to_junk_fn(gimple stmt) { tree tv, rv, fn, rhs, tmp; gimple gimp; gimple_stmt_iterator gsi; static bool has_initted; static tree decl, proto, decl_get_funcs, proto_get_funcs, fn_ptr_type; printf("slimer: Inserting junk function call at line: %d\n", gimple_lineno(stmt)); /* Get random value modulo n_funcs for index into runtime __funcs array of * junk functions: * rv = time % n_funcs; * fn = __funcs + rv; * call fn */ /* Build instances */ if (!has_initted) { proto = build_function_type_list( uint64_type_node, ptr_type_node, NULL_TREE); decl = build_fn_decl("time", proto); DECL_EXTERNAL(decl) = 1; proto_get_funcs = build_function_type_list(ptr_type_node, NULL_TREE); decl_get_funcs = build_fn_decl("__slimer_get_funcs", proto_get_funcs); fn_ptr_type = build_function_type_list( void_type_node, void_type_node, NULL_TREE); has_initted = true; } /* time_tmp = time(NULL); */ tv = create_tmp_var(uint64_type_node, "time_tmp"); tv = make_ssa_name(tv, NULL); gimp = gimple_build_call(decl, 1, null_pointer_node); gimple_set_lhs(gimp, tv); gsi = gsi_for_stmt(stmt); gsi_insert_before(&gsi, gimp, GSI_SAME_STMT); /* rv_tmp = time_temp % n_funcs */ rv = create_tmp_var(uint64_type_node, "rv_tmp"); rv = make_ssa_name(rv, NULL); rhs = build_int_cst(integer_type_node, n_funcs); gimp = gimple_build_assign_with_ops(TRUNC_MOD_EXPR, rv, tv, rhs); gsi_insert_before(&gsi, gimp, GSI_SAME_STMT); /* tmp = __slimer_get_funcs(); TODO: Get rid of __slimer_get_funcs() * rv = rv * sizeof(void *) * fn_tmp = tmp + rv */ tree pp_type = build_pointer_type(ptr_type_node); tmp = create_tmp_var(pp_type, "tmp"); tmp = make_ssa_name(tmp, NULL); gimp = gimple_build_call(decl_get_funcs, 0); gimple_set_lhs(gimp, tmp); gsi_insert_before(&gsi, gimp, GSI_SAME_STMT); /* rv = rv * sizeof(void *)) * FIXME: THIS IS NOT SUFFICIENT FOR CROSS COMPILING FOR ARCHITECTURES THAT * HAVE ADDRESS SIZES sizeof(void *) */ tree addr_size = build_int_cst(integer_type_node, sizeof(void *)); gimp = gimple_build_assign_with_ops(MULT_EXPR, rv, rv, addr_size); gsi_insert_before(&gsi, gimp, GSI_SAME_STMT); fn = create_tmp_var(pp_type, "fn_tmp"); fn = make_ssa_name(fn, NULL); gimp = gimple_build_assign_with_ops(PLUS_EXPR, fn, tmp, rv); gsi_insert_before(&gsi, gimp, GSI_SAME_STMT); /* the_fn = *fn */ tree f = build_pointer_type(fn_ptr_type); tree the_fn = create_tmp_var(f, "the_func_ptr"); the_fn = make_ssa_name(the_fn, NULL); gimp = gimple_build_assign(the_fn, build_simple_mem_ref(fn)); gsi_insert_before(&gsi, gimp, GSI_SAME_STMT); /* call the_fn */ gimp = gimple_build_call(the_fn, 0); gsi_insert_before(&gsi, gimp, GSI_SAME_STMT); #ifdef GOAT_DEBUG debug_function(cfun->decl, 0); #endif }