static void instrument_mem_ref (tree t, gimple_stmt_iterator *iter, bool is_lhs) { enum ubsan_null_ckind ikind = is_lhs ? UBSAN_STORE_OF : UBSAN_LOAD_OF; if (RECORD_OR_UNION_TYPE_P (TREE_TYPE (TREE_TYPE (t)))) ikind = UBSAN_MEMBER_ACCESS; tree kind = build_int_cst (unsigned_char_type_node, ikind); gimple g = gimple_build_call_internal (IFN_UBSAN_NULL, 2, t, kind); gimple_set_location (g, gimple_location (gsi_stmt (*iter))); gsi_insert_before (iter, g, GSI_SAME_STMT); }
void gsi_move_before (gimple_stmt_iterator *from, gimple_stmt_iterator *to) { gimple stmt = gsi_stmt (*from); gsi_remove (from, false); /* For consistency with gsi_move_after, it might be better to have GSI_NEW_STMT here; however, that breaks several places that expect that TO does not change. */ gsi_insert_before (to, stmt, GSI_SAME_STMT); }
void gimple_gen_ic_func_profiler (void) { struct cgraph_node * c_node = cgraph_node::get (current_function_decl); gimple_stmt_iterator gsi; gcall *stmt1; gassign *stmt2; tree tree_uid, cur_func, void0; if (c_node->only_called_directly_p ()) return; gimple_init_edge_profiler (); /* Insert code: stmt1: __gcov_indirect_call_profiler_v2 (profile_id, ¤t_function_decl) */ gsi = gsi_after_labels (split_edge (single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun)))); cur_func = force_gimple_operand_gsi (&gsi, build_addr (current_function_decl, current_function_decl), true, NULL_TREE, true, GSI_SAME_STMT); tree_uid = build_int_cst (gcov_type_node, cgraph_node::get (current_function_decl)->profile_id); stmt1 = gimple_build_call (tree_indirect_call_profiler_fn, 2, tree_uid, cur_func); gsi_insert_before (&gsi, stmt1, GSI_SAME_STMT); /* Set __gcov_indirect_call_callee to 0, so that calls from other modules won't get misattributed to the last caller of the current callee. */ void0 = build_int_cst (build_pointer_type (void_type_node), 0); stmt2 = gimple_build_assign (ic_void_ptr_var, void0); gsi_insert_before (&gsi, stmt2, GSI_SAME_STMT); }
static bool remove_exits_and_undefined_stmts (struct loop *loop, unsigned int npeeled) { struct nb_iter_bound *elt; bool changed = false; for (elt = loop->bounds; elt; elt = elt->next) { /* If statement is known to be undefined after peeling, turn it into unreachable (or trap when debugging experience is supposed to be good). */ if (!elt->is_exit && wi::ltu_p (elt->bound, npeeled)) { gimple_stmt_iterator gsi = gsi_for_stmt (elt->stmt); gcall *stmt = gimple_build_call (builtin_decl_implicit (BUILT_IN_UNREACHABLE), 0); gimple_set_location (stmt, gimple_location (elt->stmt)); gsi_insert_before (&gsi, stmt, GSI_NEW_STMT); split_block (gimple_bb (stmt), stmt); changed = true; if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, "Forced statement unreachable: "); print_gimple_stmt (dump_file, elt->stmt, 0, 0); } } /* If we know the exit will be taken after peeling, update. */ else if (elt->is_exit && wi::leu_p (elt->bound, npeeled)) { basic_block bb = gimple_bb (elt->stmt); edge exit_edge = EDGE_SUCC (bb, 0); if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, "Forced exit to be taken: "); print_gimple_stmt (dump_file, elt->stmt, 0, 0); } if (!loop_exit_edge_p (loop, exit_edge)) exit_edge = EDGE_SUCC (bb, 1); gcc_checking_assert (loop_exit_edge_p (loop, exit_edge)); gcond *cond_stmt = as_a <gcond *> (elt->stmt); if (exit_edge->flags & EDGE_TRUE_VALUE) gimple_cond_make_true (cond_stmt); else gimple_cond_make_false (cond_stmt); update_stmt (cond_stmt); changed = true; } } return changed; }
void gimple_gen_time_profiler (unsigned tag, unsigned base, gimple_stmt_iterator &gsi) { tree ref_ptr = tree_coverage_counter_addr (tag, base); gcall *call; ref_ptr = force_gimple_operand_gsi (&gsi, ref_ptr, true, NULL_TREE, true, GSI_SAME_STMT); call = gimple_build_call (tree_time_profiler_fn, 1, ref_ptr); gsi_insert_before (&gsi, call, GSI_NEW_STMT); }
static void insert_trap_and_remove_trailing_statements (gimple_stmt_iterator *si_p, tree op) { /* We want the NULL pointer dereference to actually occur so that code that wishes to catch the signal can do so. If the dereference is a load, then there's nothing to do as the LHS will be a throw-away SSA_NAME and the RHS is the NULL dereference. If the dereference is a store and we can easily transform the RHS, then simplify the RHS to enable more DCE. Note that we require the statement to be a GIMPLE_ASSIGN which filters out calls on the RHS. */ gimple stmt = gsi_stmt (*si_p); if (walk_stmt_load_store_ops (stmt, (void *)op, NULL, check_loadstore) && is_gimple_assign (stmt) && INTEGRAL_TYPE_P (TREE_TYPE (gimple_assign_lhs (stmt)))) { /* We just need to turn the RHS into zero converted to the proper type. */ tree type = TREE_TYPE (gimple_assign_lhs (stmt)); gimple_assign_set_rhs_code (stmt, INTEGER_CST); gimple_assign_set_rhs1 (stmt, fold_convert (type, integer_zero_node)); update_stmt (stmt); } gimple new_stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_TRAP), 0); gimple_seq seq = NULL; gimple_seq_add_stmt (&seq, new_stmt); /* If we had a NULL pointer dereference, then we want to insert the __builtin_trap after the statement, for the other cases we want to insert before the statement. */ if (walk_stmt_load_store_ops (stmt, (void *)op, check_loadstore, check_loadstore)) gsi_insert_after (si_p, seq, GSI_NEW_STMT); else gsi_insert_before (si_p, seq, GSI_NEW_STMT); /* We must remove statements from the end of the block so that we never reference a released SSA_NAME. */ basic_block bb = gimple_bb (gsi_stmt (*si_p)); for (gimple_stmt_iterator si = gsi_last_bb (bb); gsi_stmt (si) != gsi_stmt (*si_p); si = gsi_last_bb (bb)) { stmt = gsi_stmt (si); unlink_stmt_vdef (stmt); gsi_remove (&si, true); release_defs (stmt); } }
void gimple_gen_ic_func_profiler (void) { struct cgraph_node * c_node = cgraph_get_node (current_function_decl); gimple_stmt_iterator gsi; gimple stmt1, stmt2; tree tree_uid, cur_func, counter_ptr, ptr_var, void0; if (cgraph_only_called_directly_p (c_node)) return; gimple_init_edge_profiler (); gsi = gsi_after_labels (single_succ (ENTRY_BLOCK_PTR)); cur_func = force_gimple_operand_gsi (&gsi, build_addr (current_function_decl, current_function_decl), true, NULL_TREE, true, GSI_SAME_STMT); counter_ptr = force_gimple_operand_gsi (&gsi, ic_gcov_type_ptr_var, true, NULL_TREE, true, GSI_SAME_STMT); add_referenced_var (ic_gcov_type_ptr_var); ptr_var = force_gimple_operand_gsi (&gsi, ic_void_ptr_var, true, NULL_TREE, true, GSI_SAME_STMT); add_referenced_var (ic_void_ptr_var); tree_uid = build_int_cst (gcov_type_node, current_function_funcdef_no); stmt1 = gimple_build_call (tree_indirect_call_profiler_fn, 4, counter_ptr, tree_uid, cur_func, ptr_var); gsi_insert_before (&gsi, stmt1, GSI_SAME_STMT); /* Set __gcov_indirect_call_callee to 0, so that calls from other modules won't get misattributed to the last caller of the current callee. */ void0 = build_int_cst (build_pointer_type (void_type_node), 0); stmt2 = gimple_build_assign (ic_void_ptr_var, void0); gsi_insert_before (&gsi, stmt2, GSI_SAME_STMT); }
static void insert_call(gimple stmt, tree decl) { gimple call; gimple_stmt_iterator gsi; /* Call the function */ call = gimple_build_call(decl, 0); gsi = gsi_for_stmt(stmt); gsi_insert_before(&gsi, call, GSI_NEW_STMT); /* So we don't process this bad-boy */ VEC_safe_push(tree, gc, analyized_fns, decl); }
static void perturb_local_entropy(basic_block bb, tree local_entropy) { gimple_stmt_iterator gsi; gimple assign; tree addxorrol, rhs; enum tree_code op; op = get_op(&rhs); addxorrol = fold_build2_loc(UNKNOWN_LOCATION, op, unsigned_intDI_type_node, local_entropy, rhs); assign = gimple_build_assign(local_entropy, addxorrol); gsi = gsi_after_labels(bb); gsi_insert_before(&gsi, assign, GSI_NEW_STMT); update_stmt(assign); //debug_bb(bb); }
void gimple_gen_ior_profiler (histogram_value value, unsigned tag, unsigned base) { gimple stmt = value->hvalue.stmt; gimple_stmt_iterator gsi = gsi_for_stmt (stmt); tree ref_ptr = tree_coverage_counter_addr (tag, base); gcall *call; tree val; ref_ptr = force_gimple_operand_gsi (&gsi, ref_ptr, true, NULL_TREE, true, GSI_SAME_STMT); val = prepare_instrumented_value (&gsi, value); call = gimple_build_call (tree_ior_profiler_fn, 2, ref_ptr, val); gsi_insert_before (&gsi, call, GSI_NEW_STMT); }
// create the function call to a `record_assignment' function and insert it // before the given statement static void insert_instrumentation_fn(gimple curr_stmt, tree var_id, tree new_value) { // build function declaration tree proto = build_function_type_list( void_type_node, /* return type */ integer_type_node, /* first arg's type */ const_ptr_type_node, /* second arg's type */ NULL_TREE); tree decl = build_fn_decl("record_assignment", proto); // build the function call with the new value tree and the variable id tree // and insert it before the statement that was passed as the first argument gimple call = gimple_build_call(decl, 2, var_id, new_value); gimple_stmt_iterator gsi = gsi_for_stmt(curr_stmt); gsi_insert_before(&gsi, call, GSI_NEW_STMT); }
static void kernexec_instrument_fptr_or(gimple_stmt_iterator *gsi) { gimple asm_or_stmt, call_stmt; tree old_fptr, new_fptr, input, output; #if BUILDING_GCC_VERSION <= 4007 VEC(tree, gc) *inputs = NULL; VEC(tree, gc) *outputs = NULL; #else vec<tree, va_gc> *inputs = NULL; vec<tree, va_gc> *outputs = NULL; #endif call_stmt = gsi_stmt(*gsi); old_fptr = gimple_call_fn(call_stmt); // create temporary fptr variable new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec_or"); #if BUILDING_GCC_VERSION <= 4007 add_referenced_var(new_fptr); #endif new_fptr = make_ssa_name(new_fptr, NULL); // build asm volatile("orq %%r10, %0\n\t" : "=r"(new_fptr) : "0"(old_fptr)); input = build_tree_list(NULL_TREE, build_string(2, "0")); input = chainon(NULL_TREE, build_tree_list(input, old_fptr)); output = build_tree_list(NULL_TREE, build_string(3, "=r")); output = chainon(NULL_TREE, build_tree_list(output, new_fptr)); #if BUILDING_GCC_VERSION <= 4007 VEC_safe_push(tree, gc, inputs, input); VEC_safe_push(tree, gc, outputs, output); #else vec_safe_push(inputs, input); vec_safe_push(outputs, output); #endif asm_or_stmt = gimple_build_asm_vec("orq %%r10, %0\n\t", inputs, outputs, NULL, NULL); SSA_NAME_DEF_STMT(new_fptr) = asm_or_stmt; gimple_asm_set_volatile(asm_or_stmt, true); gsi_insert_before(gsi, asm_or_stmt, GSI_SAME_STMT); update_stmt(asm_or_stmt); // replace call stmt fn with the new fptr gimple_call_set_fn(call_stmt, new_fptr); update_stmt(call_stmt); }
static basic_block hoist_edge_and_branch_if_true (gimple_stmt_iterator *gsip, tree cond, edge e_true, bool update_dominators) { tree tmp; gcond *cond_stmt; edge e_false; basic_block new_bb, split_bb = gsi_bb (*gsip); bool dominated_e_true = false; gcc_assert (e_true->src == split_bb); if (update_dominators && get_immediate_dominator (CDI_DOMINATORS, e_true->dest) == split_bb) dominated_e_true = true; tmp = force_gimple_operand_gsi (gsip, cond, /*simple=*/true, NULL, /*before=*/true, GSI_SAME_STMT); cond_stmt = gimple_build_cond_from_tree (tmp, NULL_TREE, NULL_TREE); gsi_insert_before (gsip, cond_stmt, GSI_SAME_STMT); e_false = split_block (split_bb, cond_stmt); new_bb = e_false->dest; redirect_edge_pred (e_true, split_bb); e_true->flags &= ~EDGE_FALLTHRU; e_true->flags |= EDGE_TRUE_VALUE; e_false->flags &= ~EDGE_FALLTHRU; e_false->flags |= EDGE_FALSE_VALUE; e_false->probability = REG_BR_PROB_BASE - e_true->probability; e_false->count = split_bb->count - e_true->count; new_bb->count = e_false->count; if (update_dominators) { if (dominated_e_true) set_immediate_dominator (CDI_DOMINATORS, e_true->dest, split_bb); set_immediate_dominator (CDI_DOMINATORS, e_false->dest, split_bb); } return new_bb; }
basic_block gsi_insert_on_edge_immediate (edge e, gimple *stmt) { gimple_stmt_iterator gsi; basic_block new_bb = NULL; bool ins_after; gcc_assert (!PENDING_STMT (e)); ins_after = gimple_find_edge_insert_loc (e, &gsi, &new_bb); update_call_edge_frequencies (stmt, gsi.bb); if (ins_after) gsi_insert_after (&gsi, stmt, GSI_NEW_STMT); else gsi_insert_before (&gsi, stmt, GSI_NEW_STMT); return new_bb; }
static gimple gen_def_assigns (gimple_stmt_iterator *gsi) { int i; gimple assign = NULL; for (i = 0; i < info.phi_count; i++) { tree name = make_ssa_name (SSA_NAME_VAR (info.target_inbound_names[i]), NULL); info.target_outbound_names[i] = name; assign = gimple_build_assign (name, info.default_values[i]); SSA_NAME_DEF_STMT (name) = assign; gsi_insert_before (gsi, assign, GSI_SAME_STMT); find_new_referenced_vars (assign); mark_symbols_for_renaming (assign); } return assign; }
static void lower_gimple_return (gimple_stmt_iterator *gsi, struct lower_data *data) { gimple stmt = gsi_stmt (*gsi); gimple t; int i; return_statements_t tmp_rs; /* Match this up with an existing return statement that's been created. */ for (i = data->return_statements.length () - 1; i >= 0; i--) { tmp_rs = data->return_statements[i]; if (gimple_return_retval (stmt) == gimple_return_retval (tmp_rs.stmt)) { /* Remove the line number from the representative return statement. It now fills in for many such returns. Failure to remove this will result in incorrect results for coverage analysis. */ gimple_set_location (tmp_rs.stmt, UNKNOWN_LOCATION); goto found; } } /* Not found. Create a new label and record the return statement. */ tmp_rs.label = create_artificial_label (cfun->function_end_locus); tmp_rs.stmt = stmt; data->return_statements.safe_push (tmp_rs); /* Generate a goto statement and remove the return statement. */ found: /* When not optimizing, make sure user returns are preserved. */ if (!optimize && gimple_has_location (stmt)) DECL_ARTIFICIAL (tmp_rs.label) = 0; t = gimple_build_goto (tmp_rs.label); gimple_set_location (t, gimple_location (stmt)); gimple_set_block (t, gimple_block (stmt)); gsi_insert_before (gsi, t, GSI_SAME_STMT); gsi_remove (gsi, false); }
static void build_one_array (gimple swtch, int num, tree arr_index_type, gimple phi, tree tidx) { tree array_type, ctor, decl, value_type, name, fetch; gimple load; gimple_stmt_iterator gsi; gcc_assert (info.default_values[num]); value_type = TREE_TYPE (info.default_values[num]); array_type = build_array_type (value_type, arr_index_type); ctor = build_constructor (array_type, info.constructors[num]); TREE_CONSTANT (ctor) = true; decl = build_decl (VAR_DECL, NULL_TREE, array_type); TREE_STATIC (decl) = 1; DECL_INITIAL (decl) = ctor; DECL_NAME (decl) = create_tmp_var_name ("CSWTCH"); DECL_ARTIFICIAL (decl) = 1; TREE_CONSTANT (decl) = 1; add_referenced_var (decl); varpool_mark_needed_node (varpool_node (decl)); varpool_finalize_decl (decl); mark_sym_for_renaming (decl); name = make_ssa_name (SSA_NAME_VAR (PHI_RESULT (phi)), NULL); info.target_inbound_names[num] = name; fetch = build4 (ARRAY_REF, value_type, decl, tidx, NULL_TREE, NULL_TREE); load = gimple_build_assign (name, fetch); SSA_NAME_DEF_STMT (name) = load; gsi = gsi_for_stmt (swtch); gsi_insert_before (&gsi, load, GSI_SAME_STMT); mark_symbols_for_renaming (load); info.arr_ref_last = load; }
void gimple_gen_interval_profiler (histogram_value value, unsigned tag, unsigned base) { gimple stmt = value->hvalue.stmt; gimple_stmt_iterator gsi = gsi_for_stmt (stmt); tree ref = tree_coverage_counter_ref (tag, base), ref_ptr; gcall *call; tree val; tree start = build_int_cst_type (integer_type_node, value->hdata.intvl.int_start); tree steps = build_int_cst_type (unsigned_type_node, value->hdata.intvl.steps); ref_ptr = force_gimple_operand_gsi (&gsi, build_addr (ref, current_function_decl), true, NULL_TREE, true, GSI_SAME_STMT); val = prepare_instrumented_value (&gsi, value); call = gimple_build_call (tree_interval_profiler_fn, 4, ref_ptr, val, start, steps); gsi_insert_before (&gsi, call, GSI_NEW_STMT); }
static void issue_prefetch_ref (struct mem_ref *ref, unsigned unroll_factor, unsigned ahead) { HOST_WIDE_INT delta; tree addr, addr_base, write_p, local; gimple prefetch; gimple_stmt_iterator bsi; unsigned n_prefetches, ap; bool nontemporal = ref->reuse_distance >= L2_CACHE_SIZE_BYTES; if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, "Issued%s prefetch for %p.\n", nontemporal ? " nontemporal" : "", (void *) ref); bsi = gsi_for_stmt (ref->stmt); n_prefetches = ((unroll_factor + ref->prefetch_mod - 1) / ref->prefetch_mod); addr_base = build_fold_addr_expr_with_type (ref->mem, ptr_type_node); addr_base = force_gimple_operand_gsi (&bsi, unshare_expr (addr_base), true, NULL, true, GSI_SAME_STMT); write_p = ref->write_p ? integer_one_node : integer_zero_node; local = build_int_cst (integer_type_node, nontemporal ? 0 : 3); for (ap = 0; ap < n_prefetches; ap++) { /* Determine the address to prefetch. */ delta = (ahead + ap * ref->prefetch_mod) * ref->group->step; addr = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node, addr_base, size_int (delta)); addr = force_gimple_operand_gsi (&bsi, unshare_expr (addr), true, NULL, true, GSI_SAME_STMT); /* Create the prefetch instruction. */ prefetch = gimple_build_call (built_in_decls[BUILT_IN_PREFETCH], 3, addr, write_p, local); gsi_insert_before (&bsi, prefetch, GSI_SAME_STMT); } }
static void handle_task_call (gimple_stmt_iterator gsi, const tracked_func_t* tf) { /* iterator points to the call instruction */ /* . generate the following statement list: generate type T; declare T* var; var = call __xkaapi_pushdata(sizeof(T)); foreach args, var->member = local_value; call __xkaapi_pushtask(); call __xkaapi_barrier(); */ tree dummy_adapter; tree sp; tree thread; gimple_seq stmts; gimple call; init_decls(); /* kaapi_pushdata_aligned */ gen_alloca_stmts(&stmts, &sp); gsi_insert_seq_before(&gsi, stmts, GSI_SAME_STMT); /* fixme: gimple_seq_free(alloca_stmts); */ /* generate the adapter routine */ /* dummy_adapter = create_dummy_adapter(); */ /* kaapi_pushtask(self_thread, sp); */ thread = build_int_cst(ptr_type_node, 0xdeadc0c0); call = gimple_build_call(kaapi_pushtask_decl, 2, sp, thread); gsi_insert_before(&gsi, call, GSI_SAME_STMT); /* kaapi_barrier(); */ call = gimple_build_call(kaapi_barrier_decl, 0); gsi_replace(&gsi, call, true); }
static void stackleak_check_alloca(gimple_stmt_iterator *gsi) { gimple stmt; gcall *check_alloca; tree alloca_size; cgraph_node_ptr node; int frequency; basic_block bb; // insert call to void pax_check_alloca(unsigned long size) alloca_size = gimple_call_arg(gsi_stmt(*gsi), 0); stmt = gimple_build_call(check_function_decl, 1, alloca_size); check_alloca = as_a_gcall(stmt); gsi_insert_before(gsi, check_alloca, GSI_SAME_STMT); // update the cgraph bb = gimple_bb(check_alloca); node = cgraph_get_create_node(check_function_decl); gcc_assert(node); frequency = compute_call_stmt_bb_frequency(current_function_decl, bb); cgraph_create_edge(cgraph_get_node(current_function_decl), node, check_alloca, bb->count, frequency, bb->loop_depth); }
static unsigned int sancov_execute(void) { basic_block bb; /* Remove this line when this plugin and kcov will be in the kernel. if (!strcmp(DECL_NAME_POINTER(current_function_decl), DECL_NAME_POINTER(sancov_fndecl))) return 0; */ FOR_EACH_BB_FN(bb, cfun) { const_gimple stmt; gcall *gcall; gimple_stmt_iterator gsi = gsi_after_labels(bb); if (gsi_end_p(gsi)) continue; stmt = gsi_stmt(gsi); gcall = as_a_gcall(gimple_build_call(sancov_fndecl, 0)); gimple_set_location(gcall, gimple_location(stmt)); gsi_insert_before(&gsi, gcall, GSI_SAME_STMT); }
basic_block gsi_insert_on_edge_immediate (edge e, gimple stmt) { gimple_stmt_iterator gsi; struct gimple_seq_node_d node; basic_block new_bb = NULL; bool ins_after; gcc_assert (!PENDING_STMT (e)); ins_after = gimple_find_edge_insert_loc (e, &gsi, &new_bb); node.stmt = stmt; node.prev = node.next = NULL; update_call_edge_frequencies (&node, gsi.bb); if (ins_after) gsi_insert_after (&gsi, stmt, GSI_NEW_STMT); else gsi_insert_before (&gsi, stmt, GSI_NEW_STMT); return new_bb; }
/* Insert a call to the runtime function "__slimer_add_fn" which will add the * "junk" function created at compile-time to an array at runtime */ static void insert_add_fn(gimple stmt, int index) { tree fn; gimple call; gimple_stmt_iterator gsi; static tree decl, proto, idx; if (!decl || !proto) { proto = build_function_type_list(void_type_node, ptr_type_node, integer_type_node, NULL_TREE); decl = build_fn_decl("__slimer_add_fn", proto); /* Add this fndecl to our list of things we do not process */ VEC_safe_push(tree, gc, analyized_fns, decl); } /* Create a constant value and pointer to the function we are to add */ idx = build_int_cst(integer_type_node, index); fn = build_addr(VEC_index(tree, fakes, index), NULL_TREE); call = gimple_build_call(decl, 2, fn, idx); gsi = gsi_for_stmt(stmt); gsi_insert_before(&gsi, call, GSI_NEW_STMT); }
static void vect_pattern_recog_1 ( gimple (* vect_recog_func) (gimple, tree *, tree *), gimple_stmt_iterator si) { gimple stmt = gsi_stmt (si), pattern_stmt; stmt_vec_info stmt_info = vinfo_for_stmt (stmt); stmt_vec_info pattern_stmt_info; loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); tree pattern_vectype; tree type_in, type_out; enum tree_code code; int i; gimple next; pattern_stmt = (* vect_recog_func) (stmt, &type_in, &type_out); if (!pattern_stmt) return; if (VECTOR_MODE_P (TYPE_MODE (type_in))) { /* No need to check target support (already checked by the pattern recognition function). */ if (type_out) gcc_assert (VECTOR_MODE_P (TYPE_MODE (type_out))); pattern_vectype = type_out ? type_out : type_in; } else { enum machine_mode vec_mode; enum insn_code icode; optab optab; /* Check target support */ type_in = get_vectype_for_scalar_type (type_in); if (!type_in) return; if (type_out) type_out = get_vectype_for_scalar_type (type_out); else type_out = type_in; if (!type_out) return; pattern_vectype = type_out; if (is_gimple_assign (pattern_stmt)) code = gimple_assign_rhs_code (pattern_stmt); else { gcc_assert (is_gimple_call (pattern_stmt)); code = CALL_EXPR; } optab = optab_for_tree_code (code, type_in, optab_default); vec_mode = TYPE_MODE (type_in); if (!optab || (icode = optab_handler (optab, vec_mode)) == CODE_FOR_nothing || (insn_data[icode].operand[0].mode != TYPE_MODE (type_out))) return; } /* Found a vectorizable pattern. */ if (vect_print_dump_info (REPORT_DETAILS)) { fprintf (vect_dump, "pattern recognized: "); print_gimple_stmt (vect_dump, pattern_stmt, 0, TDF_SLIM); } /* Mark the stmts that are involved in the pattern. */ gsi_insert_before (&si, pattern_stmt, GSI_SAME_STMT); set_vinfo_for_stmt (pattern_stmt, new_stmt_vec_info (pattern_stmt, loop_vinfo, NULL)); pattern_stmt_info = vinfo_for_stmt (pattern_stmt); STMT_VINFO_RELATED_STMT (pattern_stmt_info) = stmt; STMT_VINFO_DEF_TYPE (pattern_stmt_info) = STMT_VINFO_DEF_TYPE (stmt_info); STMT_VINFO_VECTYPE (pattern_stmt_info) = pattern_vectype; STMT_VINFO_IN_PATTERN_P (stmt_info) = true; STMT_VINFO_RELATED_STMT (stmt_info) = pattern_stmt; /* Patterns cannot be vectorized using SLP, because they change the order of computation. */ FOR_EACH_VEC_ELT (gimple, LOOP_VINFO_REDUCTIONS (loop_vinfo), i, next) if (next == stmt) VEC_ordered_remove (gimple, LOOP_VINFO_REDUCTIONS (loop_vinfo), i); }
static void lower_builtin_setjmp (gimple_stmt_iterator *gsi) { gimple stmt = gsi_stmt (*gsi); location_t loc = gimple_location (stmt); tree cont_label = create_artificial_label (loc); tree next_label = create_artificial_label (loc); tree dest, t, arg; gimple g; /* NEXT_LABEL is the label __builtin_longjmp will jump to. Its address is passed to both __builtin_setjmp_setup and __builtin_setjmp_receiver. */ FORCED_LABEL (next_label) = 1; dest = gimple_call_lhs (stmt); /* Build '__builtin_setjmp_setup (BUF, NEXT_LABEL)' and insert. */ arg = build_addr (next_label, current_function_decl); t = builtin_decl_implicit (BUILT_IN_SETJMP_SETUP); g = gimple_build_call (t, 2, gimple_call_arg (stmt, 0), arg); gimple_set_location (g, loc); gimple_set_block (g, gimple_block (stmt)); gsi_insert_before (gsi, g, GSI_SAME_STMT); /* Build 'DEST = 0' and insert. */ if (dest) { g = gimple_build_assign (dest, build_zero_cst (TREE_TYPE (dest))); gimple_set_location (g, loc); gimple_set_block (g, gimple_block (stmt)); gsi_insert_before (gsi, g, GSI_SAME_STMT); } /* Build 'goto CONT_LABEL' and insert. */ g = gimple_build_goto (cont_label); gsi_insert_before (gsi, g, GSI_SAME_STMT); /* Build 'NEXT_LABEL:' and insert. */ g = gimple_build_label (next_label); gsi_insert_before (gsi, g, GSI_SAME_STMT); /* Build '__builtin_setjmp_receiver (NEXT_LABEL)' and insert. */ arg = build_addr (next_label, current_function_decl); t = builtin_decl_implicit (BUILT_IN_SETJMP_RECEIVER); g = gimple_build_call (t, 1, arg); gimple_set_location (g, loc); gimple_set_block (g, gimple_block (stmt)); gsi_insert_before (gsi, g, GSI_SAME_STMT); /* Build 'DEST = 1' and insert. */ if (dest) { g = gimple_build_assign (dest, fold_convert_loc (loc, TREE_TYPE (dest), integer_one_node)); gimple_set_location (g, loc); gimple_set_block (g, gimple_block (stmt)); gsi_insert_before (gsi, g, GSI_SAME_STMT); } /* Build 'CONT_LABEL:' and insert. */ g = gimple_build_label (cont_label); gsi_insert_before (gsi, g, GSI_SAME_STMT); /* Remove the call to __builtin_setjmp. */ gsi_remove (gsi, false); }
static void instrument_bool_enum_load (gimple_stmt_iterator *gsi) { gimple stmt = gsi_stmt (*gsi); tree rhs = gimple_assign_rhs1 (stmt); tree type = TREE_TYPE (rhs); tree minv = NULL_TREE, maxv = NULL_TREE; if (TREE_CODE (type) == BOOLEAN_TYPE && (flag_sanitize & SANITIZE_BOOL)) { minv = boolean_false_node; maxv = boolean_true_node; } else if (TREE_CODE (type) == ENUMERAL_TYPE && (flag_sanitize & SANITIZE_ENUM) && TREE_TYPE (type) != NULL_TREE && TREE_CODE (TREE_TYPE (type)) == INTEGER_TYPE && (TYPE_PRECISION (TREE_TYPE (type)) < GET_MODE_PRECISION (TYPE_MODE (type)))) { minv = TYPE_MIN_VALUE (TREE_TYPE (type)); maxv = TYPE_MAX_VALUE (TREE_TYPE (type)); } else return; int modebitsize = GET_MODE_BITSIZE (TYPE_MODE (type)); HOST_WIDE_INT bitsize, bitpos; tree offset; enum machine_mode mode; int volatilep = 0, unsignedp = 0; tree base = get_inner_reference (rhs, &bitsize, &bitpos, &offset, &mode, &unsignedp, &volatilep, false); tree utype = build_nonstandard_integer_type (modebitsize, 1); if ((TREE_CODE (base) == VAR_DECL && DECL_HARD_REGISTER (base)) || (bitpos % modebitsize) != 0 || bitsize != modebitsize || GET_MODE_BITSIZE (TYPE_MODE (utype)) != modebitsize || TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME) return; location_t loc = gimple_location (stmt); tree ptype = build_pointer_type (TREE_TYPE (rhs)); tree atype = reference_alias_ptr_type (rhs); gimple g = gimple_build_assign (make_ssa_name (ptype, NULL), build_fold_addr_expr (rhs)); gimple_set_location (g, loc); gsi_insert_before (gsi, g, GSI_SAME_STMT); tree mem = build2 (MEM_REF, utype, gimple_assign_lhs (g), build_int_cst (atype, 0)); tree urhs = make_ssa_name (utype, NULL); g = gimple_build_assign (urhs, mem); gimple_set_location (g, loc); gsi_insert_before (gsi, g, GSI_SAME_STMT); minv = fold_convert (utype, minv); maxv = fold_convert (utype, maxv); if (!integer_zerop (minv)) { g = gimple_build_assign_with_ops (MINUS_EXPR, make_ssa_name (utype, NULL), urhs, minv); gimple_set_location (g, loc); gsi_insert_before (gsi, g, GSI_SAME_STMT); } gimple_stmt_iterator gsi2 = *gsi; basic_block then_bb, fallthru_bb; *gsi = create_cond_insert_point (gsi, true, false, true, &then_bb, &fallthru_bb); g = gimple_build_cond (GT_EXPR, gimple_assign_lhs (g), int_const_binop (MINUS_EXPR, maxv, minv), NULL_TREE, NULL_TREE); gimple_set_location (g, loc); gsi_insert_after (gsi, g, GSI_NEW_STMT); gimple_assign_set_rhs_with_ops (&gsi2, NOP_EXPR, urhs, NULL_TREE); update_stmt (stmt); gsi2 = gsi_after_labels (then_bb); if (flag_sanitize_undefined_trap_on_error) g = gimple_build_call (builtin_decl_explicit (BUILT_IN_TRAP), 0); else { tree data = ubsan_create_data ("__ubsan_invalid_value_data", &loc, NULL, ubsan_type_descriptor (type), NULL_TREE); data = build_fold_addr_expr_loc (loc, data); enum built_in_function bcode = flag_sanitize_recover ? BUILT_IN_UBSAN_HANDLE_LOAD_INVALID_VALUE : BUILT_IN_UBSAN_HANDLE_LOAD_INVALID_VALUE_ABORT; tree fn = builtin_decl_explicit (bcode); tree val = force_gimple_operand_gsi (&gsi2, ubsan_encode_value (urhs), true, NULL_TREE, true, GSI_SAME_STMT); g = gimple_build_call (fn, 2, data, val); } gimple_set_location (g, loc); gsi_insert_before (&gsi2, g, GSI_SAME_STMT); }
static bool shrink_wrap_one_built_in_call (gimple bi_call) { gimple_stmt_iterator bi_call_bsi; basic_block bi_call_bb, join_tgt_bb, guard_bb, guard_bb0; edge join_tgt_in_edge_from_call, join_tgt_in_edge_fall_thru; edge bi_call_in_edge0, guard_bb_in_edge; unsigned tn_cond_stmts, nconds; unsigned ci; gimple cond_expr = NULL; gimple cond_expr_start; tree bi_call_label_decl; gimple bi_call_label; auto_vec<gimple, 12> conds; gen_shrink_wrap_conditions (bi_call, conds, &nconds); /* This can happen if the condition generator decides it is not beneficial to do the transformation. Just return false and do not do any transformation for the call. */ if (nconds == 0) return false; bi_call_bb = gimple_bb (bi_call); /* Now find the join target bb -- split bi_call_bb if needed. */ if (stmt_ends_bb_p (bi_call)) { /* If the call must be the last in the bb, don't split the block, it could e.g. have EH edges. */ join_tgt_in_edge_from_call = find_fallthru_edge (bi_call_bb->succs); if (join_tgt_in_edge_from_call == NULL) return false; } else join_tgt_in_edge_from_call = split_block (bi_call_bb, bi_call); bi_call_bsi = gsi_for_stmt (bi_call); join_tgt_bb = join_tgt_in_edge_from_call->dest; /* Now it is time to insert the first conditional expression into bi_call_bb and split this bb so that bi_call is shrink-wrapped. */ tn_cond_stmts = conds.length (); cond_expr = NULL; cond_expr_start = conds[0]; for (ci = 0; ci < tn_cond_stmts; ci++) { gimple c = conds[ci]; gcc_assert (c || ci != 0); if (!c) break; gsi_insert_before (&bi_call_bsi, c, GSI_SAME_STMT); cond_expr = c; } nconds--; ci++; gcc_assert (cond_expr && gimple_code (cond_expr) == GIMPLE_COND); /* Now the label. */ bi_call_label_decl = create_artificial_label (gimple_location (bi_call)); bi_call_label = gimple_build_label (bi_call_label_decl); gsi_insert_before (&bi_call_bsi, bi_call_label, GSI_SAME_STMT); bi_call_in_edge0 = split_block (bi_call_bb, cond_expr); bi_call_in_edge0->flags &= ~EDGE_FALLTHRU; bi_call_in_edge0->flags |= EDGE_TRUE_VALUE; guard_bb0 = bi_call_bb; bi_call_bb = bi_call_in_edge0->dest; join_tgt_in_edge_fall_thru = make_edge (guard_bb0, join_tgt_bb, EDGE_FALSE_VALUE); bi_call_in_edge0->probability = REG_BR_PROB_BASE * ERR_PROB; bi_call_in_edge0->count = apply_probability (guard_bb0->count, bi_call_in_edge0->probability); join_tgt_in_edge_fall_thru->probability = inverse_probability (bi_call_in_edge0->probability); join_tgt_in_edge_fall_thru->count = guard_bb0->count - bi_call_in_edge0->count; /* Code generation for the rest of the conditions */ guard_bb = guard_bb0; while (nconds > 0) { unsigned ci0; edge bi_call_in_edge; gimple_stmt_iterator guard_bsi = gsi_for_stmt (cond_expr_start); ci0 = ci; cond_expr_start = conds[ci0]; for (; ci < tn_cond_stmts; ci++) { gimple c = conds[ci]; gcc_assert (c || ci != ci0); if (!c) break; gsi_insert_before (&guard_bsi, c, GSI_SAME_STMT); cond_expr = c; } nconds--; ci++; gcc_assert (cond_expr && gimple_code (cond_expr) == GIMPLE_COND); guard_bb_in_edge = split_block (guard_bb, cond_expr); guard_bb_in_edge->flags &= ~EDGE_FALLTHRU; guard_bb_in_edge->flags |= EDGE_FALSE_VALUE; bi_call_in_edge = make_edge (guard_bb, bi_call_bb, EDGE_TRUE_VALUE); bi_call_in_edge->probability = REG_BR_PROB_BASE * ERR_PROB; bi_call_in_edge->count = apply_probability (guard_bb->count, bi_call_in_edge->probability); guard_bb_in_edge->probability = inverse_probability (bi_call_in_edge->probability); guard_bb_in_edge->count = guard_bb->count - bi_call_in_edge->count; } if (dump_file && (dump_flags & TDF_DETAILS)) { location_t loc; loc = gimple_location (bi_call); fprintf (dump_file, "%s:%d: note: function call is shrink-wrapped" " into error conditions.\n", LOCATION_FILE (loc), LOCATION_LINE (loc)); } return true; }
/* Synthesize a CALL_EXPR and a TRY_FINALLY_EXPR, for this chain of _DECLs if appropriate. Arrange to call the __mf_register function now, and the __mf_unregister function later for each. Return the gimple sequence after synthesis. */ gimple_seq mx_register_decls (tree decl, gimple_seq seq, location_t location) { gimple_seq finally_stmts = NULL; gimple_stmt_iterator initially_stmts = gsi_start (seq); while (decl != NULL_TREE) { if (mf_decl_eligible_p (decl) /* Not already processed. */ && ! mf_marked_p (decl) /* Automatic variable. */ && ! DECL_EXTERNAL (decl) && ! TREE_STATIC (decl)) { tree size = NULL_TREE, variable_name; gimple unregister_fncall, register_fncall; tree unregister_fncall_param, register_fncall_param; /* Variable-sized objects should have sizes already been gimplified when we got here. */ size = fold_convert (size_type_node, TYPE_SIZE_UNIT (TREE_TYPE (decl))); gcc_assert (is_gimple_val (size)); unregister_fncall_param = mf_mark (build1 (ADDR_EXPR, build_pointer_type (TREE_TYPE (decl)), decl)); /* __mf_unregister (&VARIABLE, sizeof (VARIABLE), __MF_TYPE_STACK) */ unregister_fncall = gimple_build_call (mf_unregister_fndecl, 3, unregister_fncall_param, size, integer_three_node); variable_name = mf_varname_tree (decl); register_fncall_param = mf_mark (build1 (ADDR_EXPR, build_pointer_type (TREE_TYPE (decl)), decl)); /* __mf_register (&VARIABLE, sizeof (VARIABLE), __MF_TYPE_STACK, "name") */ register_fncall = gimple_build_call (mf_register_fndecl, 4, register_fncall_param, size, integer_three_node, variable_name); /* Accumulate the two calls. */ gimple_set_location (register_fncall, location); gimple_set_location (unregister_fncall, location); /* Add the __mf_register call at the current appending point. */ if (gsi_end_p (initially_stmts)) { if (!mf_artificial (decl)) warning (OPT_Wmudflap, "mudflap cannot track %qE in stub function", DECL_NAME (decl)); } else { gsi_insert_before (&initially_stmts, register_fncall, GSI_SAME_STMT); /* Accumulate the FINALLY piece. */ gimple_seq_add_stmt (&finally_stmts, unregister_fncall); } mf_mark (decl); } decl = DECL_CHAIN (decl); } /* Actually, (initially_stmts!=NULL) <=> (finally_stmts!=NULL) */ if (finally_stmts != NULL) { gimple stmt = gimple_build_try (seq, finally_stmts, GIMPLE_TRY_FINALLY); gimple_seq new_seq = NULL; gimple_seq_add_stmt (&new_seq, stmt); return new_seq; } else return seq; }
static void instrument_si_overflow (gimple_stmt_iterator gsi) { gimple stmt = gsi_stmt (gsi); tree_code code = gimple_assign_rhs_code (stmt); tree lhs = gimple_assign_lhs (stmt); tree lhstype = TREE_TYPE (lhs); tree a, b; gimple g; /* If this is not a signed operation, don't instrument anything here. Also punt on bit-fields. */ if (!INTEGRAL_TYPE_P (lhstype) || TYPE_OVERFLOW_WRAPS (lhstype) || GET_MODE_BITSIZE (TYPE_MODE (lhstype)) != TYPE_PRECISION (lhstype)) return; switch (code) { case MINUS_EXPR: case PLUS_EXPR: case MULT_EXPR: /* Transform i = u {+,-,*} 5; into i = UBSAN_CHECK_{ADD,SUB,MUL} (u, 5); */ a = gimple_assign_rhs1 (stmt); b = gimple_assign_rhs2 (stmt); g = gimple_build_call_internal (code == PLUS_EXPR ? IFN_UBSAN_CHECK_ADD : code == MINUS_EXPR ? IFN_UBSAN_CHECK_SUB : IFN_UBSAN_CHECK_MUL, 2, a, b); gimple_call_set_lhs (g, lhs); gsi_replace (&gsi, g, false); break; case NEGATE_EXPR: /* Represent i = -u; as i = UBSAN_CHECK_SUB (0, u); */ a = build_int_cst (lhstype, 0); b = gimple_assign_rhs1 (stmt); g = gimple_build_call_internal (IFN_UBSAN_CHECK_SUB, 2, a, b); gimple_call_set_lhs (g, lhs); gsi_replace (&gsi, g, false); break; case ABS_EXPR: /* Transform i = ABS_EXPR<u>; into _N = UBSAN_CHECK_SUB (0, u); i = ABS_EXPR<_N>; */ a = build_int_cst (lhstype, 0); b = gimple_assign_rhs1 (stmt); g = gimple_build_call_internal (IFN_UBSAN_CHECK_SUB, 2, a, b); a = make_ssa_name (lhstype, NULL); gimple_call_set_lhs (g, a); gimple_set_location (g, gimple_location (stmt)); gsi_insert_before (&gsi, g, GSI_SAME_STMT); gimple_assign_set_rhs1 (stmt, a); update_stmt (stmt); break; default: break; } }