void cgraph_rebuild_references (void) { basic_block bb; struct cgraph_node *node = cgraph_get_node (current_function_decl); gimple_stmt_iterator gsi; ipa_remove_all_references (&node->ref_list); node->count = ENTRY_BLOCK_PTR->count; FOR_EACH_BB (bb) { for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) { gimple stmt = gsi_stmt (gsi); walk_stmt_load_store_addr_ops (stmt, node, mark_load, mark_store, mark_address); } for (gsi = gsi_start (phi_nodes (bb)); !gsi_end_p (gsi); gsi_next (&gsi)) walk_stmt_load_store_addr_ops (gsi_stmt (gsi), node, mark_load, mark_store, mark_address); } record_eh_tables (node, cfun); }
static void lower_try_catch (gimple_stmt_iterator *gsi, struct lower_data *data) { bool cannot_fallthru; gimple stmt = gsi_stmt (*gsi); gimple_stmt_iterator i; /* We don't handle GIMPLE_TRY_FINALLY. */ gcc_assert (gimple_try_kind (stmt) == GIMPLE_TRY_CATCH); lower_sequence (gimple_try_eval_ptr (stmt), data); cannot_fallthru = data->cannot_fallthru; i = gsi_start (*gimple_try_cleanup_ptr (stmt)); switch (gimple_code (gsi_stmt (i))) { case GIMPLE_CATCH: /* We expect to see a sequence of GIMPLE_CATCH stmts, each with a catch expression and a body. The whole try/catch may fall through iff any of the catch bodies falls through. */ for (; !gsi_end_p (i); gsi_next (&i)) { data->cannot_fallthru = false; lower_sequence (gimple_catch_handler_ptr ( as_a <gcatch *> (gsi_stmt (i))), data); if (!data->cannot_fallthru) cannot_fallthru = false; } break; case GIMPLE_EH_FILTER: /* The exception filter expression only matters if there is an exception. If the exception does not match EH_FILTER_TYPES, we will execute EH_FILTER_FAILURE, and we will fall through if that falls through. If the exception does match EH_FILTER_TYPES, the stack unwinder will continue up the stack, so we will not fall through. We don't know whether we will throw an exception which matches EH_FILTER_TYPES or not, so we just ignore EH_FILTER_TYPES and assume that we might throw an exception which doesn't match. */ data->cannot_fallthru = false; lower_sequence (gimple_eh_filter_failure_ptr (gsi_stmt (i)), data); if (!data->cannot_fallthru) cannot_fallthru = false; break; default: /* This case represents statements to be executed when an exception occurs. Those statements are implicitly followed by a GIMPLE_RESX to resume execution after the exception. So in this case the try/catch never falls through. */ data->cannot_fallthru = false; lower_sequence (gimple_try_cleanup_ptr (stmt), data); break; } data->cannot_fallthru = cannot_fallthru; gsi_next (gsi); }
gimple * walk_gimple_seq_mod (gimple_seq *pseq, walk_stmt_fn callback_stmt, walk_tree_fn callback_op, struct walk_stmt_info *wi) { gimple_stmt_iterator gsi; for (gsi = gsi_start (*pseq); !gsi_end_p (gsi); ) { tree ret = walk_gimple_stmt (&gsi, callback_stmt, callback_op, wi); if (ret) { /* If CALLBACK_STMT or CALLBACK_OP return a value, WI must exist to hold it. */ gcc_assert (wi); wi->callback_result = ret; return wi->removed_stmt ? NULL : gsi_stmt (gsi); } if (!wi->removed_stmt) gsi_next (&gsi); } if (wi) wi->callback_result = NULL_TREE; return NULL; }
static void prop_phis (basic_block b) { gimple_stmt_iterator psi; gimple_seq phis = phi_nodes (b); for (psi = gsi_start (phis); !gsi_end_p (psi); ) { gimple phi = gsi_stmt (psi); tree def = gimple_phi_result (phi), use = gimple_phi_arg_def (phi, 0); gcc_assert (gimple_phi_num_args (phi) == 1); if (!is_gimple_reg (def)) { imm_use_iterator iter; use_operand_p use_p; gimple stmt; FOR_EACH_IMM_USE_STMT (stmt, iter, def) FOR_EACH_IMM_USE_ON_STMT (use_p, iter) SET_USE (use_p, use); } else replace_uses_by (def, use); remove_phi_node (&psi, true); } }
static void lower_sequence (gimple_seq *seq, struct lower_data *data) { gimple_stmt_iterator gsi; for (gsi = gsi_start (*seq); !gsi_end_p (gsi); ) lower_stmt (&gsi, data); }
void update_modified_stmts (gimple_seq seq) { gimple_stmt_iterator gsi; if (!ssa_operands_active (cfun)) return; for (gsi = gsi_start (seq); !gsi_end_p (gsi); gsi_next (&gsi)) update_stmt_if_modified (gsi_stmt (gsi)); }
void set_phi_nodes (basic_block bb, gimple_seq seq) { gimple_stmt_iterator i; gcc_checking_assert (!(bb->flags & BB_RTL)); bb->il.gimple.phi_nodes = seq; if (seq) for (i = gsi_start (seq); !gsi_end_p (i); gsi_next (&i)) gimple_set_bb (gsi_stmt (i), bb); }
static gphi * single_non_singleton_phi_for_edges (gimple_seq seq, edge e0, edge e1) { gimple_stmt_iterator i; gphi *phi = NULL; if (gimple_seq_singleton_p (seq)) return as_a <gphi *> (gsi_stmt (gsi_start (seq))); for (i = gsi_start (seq); !gsi_end_p (i); gsi_next (&i)) { gphi *p = as_a <gphi *> (gsi_stmt (i)); /* If the PHI arguments are equal then we can skip this PHI. */ if (operand_equal_for_phi_arg_p (gimple_phi_arg_def (p, e0->dest_idx), gimple_phi_arg_def (p, e1->dest_idx))) continue; /* If we already have a PHI that has the two edge arguments are different, then return it is not a singleton for these PHIs. */ if (phi) return NULL; phi = p; } return phi; }
static bool gimple_try_catch_may_fallthru (gtry *stmt) { gimple_stmt_iterator i; /* We don't handle GIMPLE_TRY_FINALLY. */ gcc_assert (gimple_try_kind (stmt) == GIMPLE_TRY_CATCH); /* If the TRY block can fall through, the whole TRY_CATCH can fall through. */ if (gimple_seq_may_fallthru (gimple_try_eval (stmt))) return true; i = gsi_start (*gimple_try_cleanup_ptr (stmt)); switch (gimple_code (gsi_stmt (i))) { case GIMPLE_CATCH: /* We expect to see a sequence of GIMPLE_CATCH stmts, each with a catch expression and a body. The whole try/catch may fall through iff any of the catch bodies falls through. */ for (; !gsi_end_p (i); gsi_next (&i)) { if (gimple_seq_may_fallthru (gimple_catch_handler ( as_a <gcatch *> (gsi_stmt (i))))) return true; } return false; case GIMPLE_EH_FILTER: /* The exception filter expression only matters if there is an exception. If the exception does not match EH_FILTER_TYPES, we will execute EH_FILTER_FAILURE, and we will fall through if that falls through. If the exception does match EH_FILTER_TYPES, the stack unwinder will continue up the stack, so we will not fall through. We don't know whether we will throw an exception which matches EH_FILTER_TYPES or not, so we just ignore EH_FILTER_TYPES and assume that we might throw an exception which doesn't match. */ return gimple_seq_may_fallthru (gimple_eh_filter_failure (gsi_stmt (i))); default: /* This case represents statements to be executed when an exception occurs. Those statements are implicitly followed by a GIMPLE_RESX to resume execution after the exception. So in this case the try/catch never falls through. */ return false; } }
unsigned int rebuild_cgraph_edges (void) { basic_block bb; struct cgraph_node *node = cgraph_get_node (current_function_decl); gimple_stmt_iterator gsi; cgraph_node_remove_callees (node); ipa_remove_all_references (&node->ref_list); node->count = ENTRY_BLOCK_PTR->count; FOR_EACH_BB (bb) { for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) { gimple stmt = gsi_stmt (gsi); tree decl; if (is_gimple_call (stmt)) { int freq = compute_call_stmt_bb_frequency (current_function_decl, bb); decl = gimple_call_fndecl (stmt); if (decl) cgraph_create_edge (node, cgraph_get_create_node (decl), stmt, bb->count, freq); else cgraph_create_indirect_edge (node, stmt, gimple_call_flags (stmt), bb->count, freq); } walk_stmt_load_store_addr_ops (stmt, node, mark_load, mark_store, mark_address); } for (gsi = gsi_start (phi_nodes (bb)); !gsi_end_p (gsi); gsi_next (&gsi)) walk_stmt_load_store_addr_ops (gsi_stmt (gsi), node, mark_load, mark_store, mark_address); } record_eh_tables (node, cfun); gcc_assert (!node->global.inlined_to); return 0; }
/* The core routine of conditional store replacement and normal phi optimizations. Both share much of the infrastructure in how to match applicable basic block patterns. DO_STORE_ELIM is true when we want to do conditional store replacement, false otherwise. DO_HOIST_LOADS is true when we want to hoist adjacent loads out of diamond control flow patterns, false otherwise. */ static unsigned int tree_ssa_phiopt_worker (bool do_store_elim, bool do_hoist_loads) { basic_block bb; basic_block *bb_order; unsigned n, i; bool cfgchanged = false; hash_set<tree> *nontrap = 0; if (do_store_elim) /* Calculate the set of non-trapping memory accesses. */ nontrap = get_non_trapping (); /* Search every basic block for COND_EXPR we may be able to optimize. We walk the blocks in order that guarantees that a block with a single predecessor is processed before the predecessor. This ensures that we collapse inner ifs before visiting the outer ones, and also that we do not try to visit a removed block. */ bb_order = single_pred_before_succ_order (); n = n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS; for (i = 0; i < n; i++) { gimple cond_stmt; gphi *phi; basic_block bb1, bb2; edge e1, e2; tree arg0, arg1; bb = bb_order[i]; cond_stmt = last_stmt (bb); /* Check to see if the last statement is a GIMPLE_COND. */ if (!cond_stmt || gimple_code (cond_stmt) != GIMPLE_COND) continue; e1 = EDGE_SUCC (bb, 0); bb1 = e1->dest; e2 = EDGE_SUCC (bb, 1); bb2 = e2->dest; /* We cannot do the optimization on abnormal edges. */ if ((e1->flags & EDGE_ABNORMAL) != 0 || (e2->flags & EDGE_ABNORMAL) != 0) continue; /* If either bb1's succ or bb2 or bb2's succ is non NULL. */ if (EDGE_COUNT (bb1->succs) == 0 || bb2 == NULL || EDGE_COUNT (bb2->succs) == 0) continue; /* Find the bb which is the fall through to the other. */ if (EDGE_SUCC (bb1, 0)->dest == bb2) ; else if (EDGE_SUCC (bb2, 0)->dest == bb1) { std::swap (bb1, bb2); std::swap (e1, e2); } else if (do_store_elim && EDGE_SUCC (bb1, 0)->dest == EDGE_SUCC (bb2, 0)->dest) { basic_block bb3 = EDGE_SUCC (bb1, 0)->dest; if (!single_succ_p (bb1) || (EDGE_SUCC (bb1, 0)->flags & EDGE_FALLTHRU) == 0 || !single_succ_p (bb2) || (EDGE_SUCC (bb2, 0)->flags & EDGE_FALLTHRU) == 0 || EDGE_COUNT (bb3->preds) != 2) continue; if (cond_if_else_store_replacement (bb1, bb2, bb3)) cfgchanged = true; continue; } else if (do_hoist_loads && EDGE_SUCC (bb1, 0)->dest == EDGE_SUCC (bb2, 0)->dest) { basic_block bb3 = EDGE_SUCC (bb1, 0)->dest; if (!FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (cond_stmt))) && single_succ_p (bb1) && single_succ_p (bb2) && single_pred_p (bb1) && single_pred_p (bb2) && EDGE_COUNT (bb->succs) == 2 && EDGE_COUNT (bb3->preds) == 2 /* If one edge or the other is dominant, a conditional move is likely to perform worse than the well-predicted branch. */ && !predictable_edge_p (EDGE_SUCC (bb, 0)) && !predictable_edge_p (EDGE_SUCC (bb, 1))) hoist_adjacent_loads (bb, bb1, bb2, bb3); continue; } else continue; e1 = EDGE_SUCC (bb1, 0); /* Make sure that bb1 is just a fall through. */ if (!single_succ_p (bb1) || (e1->flags & EDGE_FALLTHRU) == 0) continue; /* Also make sure that bb1 only have one predecessor and that it is bb. */ if (!single_pred_p (bb1) || single_pred (bb1) != bb) continue; if (do_store_elim) { /* bb1 is the middle block, bb2 the join block, bb the split block, e1 the fallthrough edge from bb1 to bb2. We can't do the optimization if the join block has more than two predecessors. */ if (EDGE_COUNT (bb2->preds) > 2) continue; if (cond_store_replacement (bb1, bb2, e1, e2, nontrap)) cfgchanged = true; } else { gimple_seq phis = phi_nodes (bb2); gimple_stmt_iterator gsi; bool candorest = true; /* Value replacement can work with more than one PHI so try that first. */ for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi)) { phi = as_a <gphi *> (gsi_stmt (gsi)); arg0 = gimple_phi_arg_def (phi, e1->dest_idx); arg1 = gimple_phi_arg_def (phi, e2->dest_idx); if (value_replacement (bb, bb1, e1, e2, phi, arg0, arg1) == 2) { candorest = false; cfgchanged = true; break; } } if (!candorest) continue; phi = single_non_singleton_phi_for_edges (phis, e1, e2); if (!phi) continue; arg0 = gimple_phi_arg_def (phi, e1->dest_idx); arg1 = gimple_phi_arg_def (phi, e2->dest_idx); /* Something is wrong if we cannot find the arguments in the PHI node. */ gcc_assert (arg0 != NULL && arg1 != NULL); if (factor_out_conditional_conversion (e1, e2, phi, arg0, arg1)) { /* factor_out_conditional_conversion may create a new PHI in BB2 and eliminate an existing PHI in BB2. Recompute values that may be affected by that change. */ phis = phi_nodes (bb2); phi = single_non_singleton_phi_for_edges (phis, e1, e2); gcc_assert (phi); arg0 = gimple_phi_arg_def (phi, e1->dest_idx); arg1 = gimple_phi_arg_def (phi, e2->dest_idx); gcc_assert (arg0 != NULL && arg1 != NULL); } /* Do the replacement of conditional if it can be done. */ if (conditional_replacement (bb, bb1, e1, e2, phi, arg0, arg1)) cfgchanged = true; else if (abs_replacement (bb, bb1, e1, e2, phi, arg0, arg1)) cfgchanged = true; else if (minmax_replacement (bb, bb1, e1, e2, phi, arg0, arg1)) cfgchanged = true; } } free (bb_order); if (do_store_elim) delete nontrap; /* If the CFG has changed, we should cleanup the CFG. */ if (cfgchanged && do_store_elim) { /* In cond-store replacement we have added some loads on edges and new VOPS (as we moved the store, and created a load). */ gsi_commit_edge_inserts (); return TODO_cleanup_cfg | TODO_update_ssa_only_virtuals; } else if (cfgchanged) return TODO_cleanup_cfg; return 0; }
static unsigned int build_cgraph_edges (void) { basic_block bb; struct cgraph_node *node = cgraph_get_node (current_function_decl); struct pointer_set_t *visited_nodes = pointer_set_create (); gimple_stmt_iterator gsi; tree decl; unsigned ix; /* Create the callgraph edges and record the nodes referenced by the function. body. */ FOR_EACH_BB (bb) { for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) { gimple stmt = gsi_stmt (gsi); tree decl; if (is_gimple_call (stmt)) { int freq = compute_call_stmt_bb_frequency (current_function_decl, bb); decl = gimple_call_fndecl (stmt); if (decl) cgraph_create_edge (node, cgraph_get_create_node (decl), stmt, bb->count, freq); else cgraph_create_indirect_edge (node, stmt, gimple_call_flags (stmt), bb->count, freq); } walk_stmt_load_store_addr_ops (stmt, node, mark_load, mark_store, mark_address); if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL && gimple_omp_parallel_child_fn (stmt)) { tree fn = gimple_omp_parallel_child_fn (stmt); ipa_record_reference (node, NULL, cgraph_get_create_node (fn), NULL, IPA_REF_ADDR, stmt); } if (gimple_code (stmt) == GIMPLE_OMP_TASK) { tree fn = gimple_omp_task_child_fn (stmt); if (fn) ipa_record_reference (node, NULL, cgraph_get_create_node (fn), NULL, IPA_REF_ADDR, stmt); fn = gimple_omp_task_copy_fn (stmt); if (fn) ipa_record_reference (node, NULL, cgraph_get_create_node (fn), NULL, IPA_REF_ADDR, stmt); } } for (gsi = gsi_start (phi_nodes (bb)); !gsi_end_p (gsi); gsi_next (&gsi)) walk_stmt_load_store_addr_ops (gsi_stmt (gsi), node, mark_load, mark_store, mark_address); } /* Look for initializers of constant variables and private statics. */ FOR_EACH_LOCAL_DECL (cfun, ix, decl) if (TREE_CODE (decl) == VAR_DECL && (TREE_STATIC (decl) && !DECL_EXTERNAL (decl))) varpool_finalize_decl (decl); record_eh_tables (node, cfun); pointer_set_destroy (visited_nodes); return 0; }
/* The core routine of conditional store replacement and normal phi optimizations. Both share much of the infrastructure in how to match applicable basic block patterns. DO_STORE_ELIM is true when we want to do conditional store replacement, false otherwise. */ static unsigned int tree_ssa_phiopt_worker (bool do_store_elim) { basic_block bb; basic_block *bb_order; unsigned n, i; bool cfgchanged = false; struct pointer_set_t *nontrap = 0; if (do_store_elim) { condstoretemp = NULL_TREE; /* Calculate the set of non-trapping memory accesses. */ nontrap = get_non_trapping (); } /* Search every basic block for COND_EXPR we may be able to optimize. We walk the blocks in order that guarantees that a block with a single predecessor is processed before the predecessor. This ensures that we collapse inner ifs before visiting the outer ones, and also that we do not try to visit a removed block. */ bb_order = blocks_in_phiopt_order (); n = n_basic_blocks - NUM_FIXED_BLOCKS; for (i = 0; i < n; i++) { gimple cond_stmt, phi; basic_block bb1, bb2; edge e1, e2; tree arg0, arg1; bb = bb_order[i]; cond_stmt = last_stmt (bb); /* Check to see if the last statement is a GIMPLE_COND. */ if (!cond_stmt || gimple_code (cond_stmt) != GIMPLE_COND) continue; e1 = EDGE_SUCC (bb, 0); bb1 = e1->dest; e2 = EDGE_SUCC (bb, 1); bb2 = e2->dest; /* We cannot do the optimization on abnormal edges. */ if ((e1->flags & EDGE_ABNORMAL) != 0 || (e2->flags & EDGE_ABNORMAL) != 0) continue; /* If either bb1's succ or bb2 or bb2's succ is non NULL. */ if (EDGE_COUNT (bb1->succs) == 0 || bb2 == NULL || EDGE_COUNT (bb2->succs) == 0) continue; /* Find the bb which is the fall through to the other. */ if (EDGE_SUCC (bb1, 0)->dest == bb2) ; else if (EDGE_SUCC (bb2, 0)->dest == bb1) { basic_block bb_tmp = bb1; edge e_tmp = e1; bb1 = bb2; bb2 = bb_tmp; e1 = e2; e2 = e_tmp; } else continue; e1 = EDGE_SUCC (bb1, 0); /* Make sure that bb1 is just a fall through. */ if (!single_succ_p (bb1) || (e1->flags & EDGE_FALLTHRU) == 0) continue; /* Also make sure that bb1 only have one predecessor and that it is bb. */ if (!single_pred_p (bb1) || single_pred (bb1) != bb) continue; if (do_store_elim) { /* bb1 is the middle block, bb2 the join block, bb the split block, e1 the fallthrough edge from bb1 to bb2. We can't do the optimization if the join block has more than two predecessors. */ if (EDGE_COUNT (bb2->preds) > 2) continue; if (cond_store_replacement (bb1, bb2, e1, e2, nontrap)) cfgchanged = true; } else { gimple_seq phis = phi_nodes (bb2); /* Check to make sure that there is only one PHI node. TODO: we could do it with more than one iff the other PHI nodes have the same elements for these two edges. */ if (! gimple_seq_singleton_p (phis)) continue; phi = gsi_stmt (gsi_start (phis)); arg0 = gimple_phi_arg_def (phi, e1->dest_idx); arg1 = gimple_phi_arg_def (phi, e2->dest_idx); /* Something is wrong if we cannot find the arguments in the PHI node. */ gcc_assert (arg0 != NULL && arg1 != NULL); /* Do the replacement of conditional if it can be done. */ if (conditional_replacement (bb, bb1, e1, e2, phi, arg0, arg1)) cfgchanged = true; else if (value_replacement (bb, bb1, e1, e2, phi, arg0, arg1)) cfgchanged = true; else if (abs_replacement (bb, bb1, e1, e2, phi, arg0, arg1)) cfgchanged = true; else if (minmax_replacement (bb, bb1, e1, e2, phi, arg0, arg1)) cfgchanged = true; } } free (bb_order); if (do_store_elim) pointer_set_destroy (nontrap); /* If the CFG has changed, we should cleanup the CFG. */ if (cfgchanged && do_store_elim) { /* In cond-store replacement we have added some loads on edges and new VOPS (as we moved the store, and created a load). */ gsi_commit_edge_inserts (); return TODO_cleanup_cfg | TODO_update_ssa_only_virtuals; } else if (cfgchanged) return TODO_cleanup_cfg; return 0; }
static unsigned int lower_function_body (void) { struct lower_data data; gimple_seq body = gimple_body (current_function_decl); gimple_seq lowered_body; gimple_stmt_iterator i; gimple bind; tree t; gimple x; /* The gimplifier should've left a body of exactly one statement, namely a GIMPLE_BIND. */ gcc_assert (gimple_seq_first (body) == gimple_seq_last (body) && gimple_code (gimple_seq_first_stmt (body)) == GIMPLE_BIND); memset (&data, 0, sizeof (data)); data.block = DECL_INITIAL (current_function_decl); BLOCK_SUBBLOCKS (data.block) = NULL_TREE; BLOCK_CHAIN (data.block) = NULL_TREE; TREE_ASM_WRITTEN (data.block) = 1; data.return_statements.create (8); bind = gimple_seq_first_stmt (body); lowered_body = NULL; gimple_seq_add_stmt (&lowered_body, bind); i = gsi_start (lowered_body); lower_gimple_bind (&i, &data); i = gsi_last (lowered_body); /* If the function falls off the end, we need a null return statement. If we've already got one in the return_statements vector, we don't need to do anything special. Otherwise build one by hand. */ if (gimple_seq_may_fallthru (lowered_body) && (data.return_statements.is_empty () || gimple_return_retval (data.return_statements.last().stmt) != NULL)) { x = gimple_build_return (NULL); gimple_set_location (x, cfun->function_end_locus); gimple_set_block (x, DECL_INITIAL (current_function_decl)); gsi_insert_after (&i, x, GSI_CONTINUE_LINKING); } /* If we lowered any return statements, emit the representative at the end of the function. */ while (!data.return_statements.is_empty ()) { return_statements_t t = data.return_statements.pop (); x = gimple_build_label (t.label); gsi_insert_after (&i, x, GSI_CONTINUE_LINKING); gsi_insert_after (&i, t.stmt, GSI_CONTINUE_LINKING); } /* If the function calls __builtin_setjmp, we need to emit the computed goto that will serve as the unique dispatcher for all the receivers. */ if (data.calls_builtin_setjmp) { tree disp_label, disp_var, arg; /* Build 'DISP_LABEL:' and insert. */ disp_label = create_artificial_label (cfun->function_end_locus); /* This mark will create forward edges from every call site. */ DECL_NONLOCAL (disp_label) = 1; cfun->has_nonlocal_label = 1; x = gimple_build_label (disp_label); gsi_insert_after (&i, x, GSI_CONTINUE_LINKING); /* Build 'DISP_VAR = __builtin_setjmp_dispatcher (DISP_LABEL);' and insert. */ disp_var = create_tmp_var (ptr_type_node, "setjmpvar"); arg = build_addr (disp_label, current_function_decl); t = builtin_decl_implicit (BUILT_IN_SETJMP_DISPATCHER); x = gimple_build_call (t, 1, arg); gimple_call_set_lhs (x, disp_var); /* Build 'goto DISP_VAR;' and insert. */ gsi_insert_after (&i, x, GSI_CONTINUE_LINKING); x = gimple_build_goto (disp_var); gsi_insert_after (&i, x, GSI_CONTINUE_LINKING); } /* Once the old body has been lowered, replace it with the new lowered sequence. */ gimple_set_body (current_function_decl, lowered_body); gcc_assert (data.block == DECL_INITIAL (current_function_decl)); BLOCK_SUBBLOCKS (data.block) = blocks_nreverse (BLOCK_SUBBLOCKS (data.block)); clear_block_marks (data.block); data.return_statements.release (); return 0; }
/* Synthesize a CALL_EXPR and a TRY_FINALLY_EXPR, for this chain of _DECLs if appropriate. Arrange to call the __mf_register function now, and the __mf_unregister function later for each. Return the gimple sequence after synthesis. */ gimple_seq mx_register_decls (tree decl, gimple_seq seq, gimple stmt, location_t location, bool func_args) { gimple_seq finally_stmts = NULL; gimple_stmt_iterator initially_stmts = gsi_start (seq); bool sframe_inserted = false; size_t front_rz_size, rear_rz_size; tree fsize, rsize, size; gimple uninit_fncall_front, uninit_fncall_rear, init_fncall_front, \ init_fncall_rear, init_assign_stmt; tree fncall_param_front, fncall_param_rear; int map_ret; while (decl != NULL_TREE) { if ((mf_decl_eligible_p (decl) || TREE_CODE(TREE_TYPE(decl)) == ARRAY_TYPE) /* Not already processed. */ && ! mf_marked_p (decl) /* Automatic variable. */ && ! DECL_EXTERNAL (decl) && ! TREE_STATIC (decl) && get_name(decl)) { DEBUGLOG("DEBUG Instrumenting %s is_complete_type %d\n", IDENTIFIER_POINTER(DECL_NAME(decl)), COMPLETE_TYPE_P(decl)); /* construct a tree corresponding to the type struct{ unsigned int rz_front[6U]; original variable unsigned int rz_rear[6U]; }; */ if (!sframe_inserted){ gimple ensure_fn_call = gimple_build_call (lbc_ensure_sframe_bitmap_fndecl, 0); gimple_set_location (ensure_fn_call, location); gsi_insert_before (&initially_stmts, ensure_fn_call, GSI_SAME_STMT); sframe_inserted = true; } // Calculate the zone sizes size_t element_size = 0, request_size = 0; if (COMPLETE_TYPE_P(decl)){ request_size = TREE_INT_CST_LOW(TYPE_SIZE_UNIT(TREE_TYPE(decl))); if (TREE_CODE(TREE_TYPE(decl)) == ARRAY_TYPE) element_size = TREE_INT_CST_LOW(TYPE_SIZE_UNIT(TREE_TYPE(TREE_TYPE(decl)))); else element_size = request_size; } calculate_zone_sizes(element_size, request_size, /*global*/ false, COMPLETE_TYPE_P(decl), &front_rz_size, &rear_rz_size); DEBUGLOG("DEBUG *SIZES* req_size %u, ele_size %u, fsize %u, rsize %u\n", request_size, element_size, front_rz_size, rear_rz_size); tree struct_type = create_struct_type(decl, front_rz_size, rear_rz_size); tree struct_var = create_struct_var(struct_type, decl, location); declare_vars(struct_var, stmt, 0); /* Inserting into hashtable */ PWord_t PV; JSLI(PV, decl_map, mf_varname_tree(decl)); gcc_assert(PV); *PV = (PWord_t) struct_var; fsize = convert (unsigned_type_node, size_int(front_rz_size)); gcc_assert (is_gimple_val (fsize)); tree rz_front = TYPE_FIELDS(struct_type); fncall_param_front = mf_mark (build1 (ADDR_EXPR, ptr_type_node, build3 (COMPONENT_REF, TREE_TYPE(rz_front), struct_var, rz_front, NULL_TREE))); uninit_fncall_front = gimple_build_call (lbc_uninit_front_rz_fndecl, 2, fncall_param_front, fsize); init_fncall_front = gimple_build_call (lbc_init_front_rz_fndecl, 2, fncall_param_front, fsize); gimple_set_location (init_fncall_front, location); gimple_set_location (uninit_fncall_front, location); // In complete types have only a front red zone if (COMPLETE_TYPE_P(decl)){ rsize = convert (unsigned_type_node, size_int(rear_rz_size)); gcc_assert (is_gimple_val (rsize)); tree rz_rear = DECL_CHAIN(DECL_CHAIN(TYPE_FIELDS (struct_type))); fncall_param_rear = mf_mark (build1 (ADDR_EXPR, ptr_type_node, build3 (COMPONENT_REF, TREE_TYPE(rz_rear), struct_var, rz_rear, NULL_TREE))); init_fncall_rear = gimple_build_call (lbc_init_rear_rz_fndecl, 2, fncall_param_rear, rsize); uninit_fncall_rear = gimple_build_call (lbc_uninit_rear_rz_fndecl, 2, fncall_param_rear, rsize); gimple_set_location (init_fncall_rear, location); gimple_set_location (uninit_fncall_rear, location); } // TODO Do I need this? #if 0 if (DECL_INITIAL(decl) != NULL_TREE){ // This code never seems to be getting executed for somehting like int i = 10; // I have no idea why? But looking at the tree dump, seems like its because // by the time it gets here, these kind of statements are split into two statements // as int i; and i = 10; respectively. I am leaving it in just in case. tree orig_var_type = DECL_CHAIN(TYPE_FIELDS (struct_type)); tree orig_var_lval = mf_mark (build3 (COMPONENT_REF, TREE_TYPE(orig_var_type), struct_var, orig_var_type, NULL_TREE)); init_assign_stmt = gimple_build_assign(orig_var_lval, DECL_INITIAL(decl)); gimple_set_location (init_assign_stmt, location); } #endif if (gsi_end_p (initially_stmts)) { // TODO handle this if (!DECL_ARTIFICIAL (decl)) warning (OPT_Wmudflap, "mudflap cannot track %qE in stub function", DECL_NAME (decl)); } else { #if 0 // Insert the declaration initializer if (DECL_INITIAL(decl) != NULL_TREE) gsi_insert_before (&initially_stmts, init_assign_stmt, GSI_SAME_STMT); #endif //gsi_insert_before (&initially_stmts, register_fncall, GSI_SAME_STMT); gsi_insert_before (&initially_stmts, init_fncall_front, GSI_SAME_STMT); if (COMPLETE_TYPE_P(decl)) gsi_insert_before (&initially_stmts, init_fncall_rear, GSI_SAME_STMT); /* Accumulate the FINALLY piece. */ //gimple_seq_add_stmt (&finally_stmts, unregister_fncall); gimple_seq_add_stmt (&finally_stmts, uninit_fncall_front); if (COMPLETE_TYPE_P(decl)) gimple_seq_add_stmt (&finally_stmts, uninit_fncall_rear); } mf_mark (decl); } decl = DECL_CHAIN (decl); } /* Actually, (initially_stmts!=NULL) <=> (finally_stmts!=NULL) */ if (finally_stmts != NULL) { gimple stmt = gimple_build_try (seq, finally_stmts, GIMPLE_TRY_FINALLY); gimple_seq new_seq = gimple_seq_alloc (); gimple_seq_add_stmt (&new_seq, stmt); return new_seq; } else return seq; }
gimple_stmt_iterator gsi_start_phis (basic_block bb) { return gsi_start (phi_nodes (bb)); }
static bool generate_memset_zero (gimple stmt, tree op0, tree nb_iter, gimple_stmt_iterator bsi) { tree addr_base; tree nb_bytes = NULL; bool res = false; gimple_seq stmts = NULL, stmt_list = NULL; gimple fn_call; tree mem, fndecl, fntype, fn; gimple_stmt_iterator i; struct data_reference *dr = XCNEW (struct data_reference); location_t loc = gimple_location (stmt); DR_STMT (dr) = stmt; DR_REF (dr) = op0; if (!dr_analyze_innermost (dr)) goto end; /* Test for a positive stride, iterating over every element. */ if (integer_zerop (fold_build2_loc (loc, MINUS_EXPR, integer_type_node, DR_STEP (dr), TYPE_SIZE_UNIT (TREE_TYPE (op0))))) { tree offset = fold_convert_loc (loc, sizetype, size_binop_loc (loc, PLUS_EXPR, DR_OFFSET (dr), DR_INIT (dr))); addr_base = fold_build2_loc (loc, POINTER_PLUS_EXPR, TREE_TYPE (DR_BASE_ADDRESS (dr)), DR_BASE_ADDRESS (dr), offset); } /* Test for a negative stride, iterating over every element. */ else if (integer_zerop (fold_build2_loc (loc, PLUS_EXPR, integer_type_node, TYPE_SIZE_UNIT (TREE_TYPE (op0)), DR_STEP (dr)))) { nb_bytes = build_size_arg_loc (loc, nb_iter, op0, &stmt_list); addr_base = size_binop_loc (loc, PLUS_EXPR, DR_OFFSET (dr), DR_INIT (dr)); addr_base = fold_build2_loc (loc, MINUS_EXPR, sizetype, addr_base, fold_convert_loc (loc, sizetype, nb_bytes)); addr_base = force_gimple_operand (addr_base, &stmts, true, NULL); gimple_seq_add_seq (&stmt_list, stmts); addr_base = fold_build2_loc (loc, POINTER_PLUS_EXPR, TREE_TYPE (DR_BASE_ADDRESS (dr)), DR_BASE_ADDRESS (dr), addr_base); } else goto end; mem = force_gimple_operand (addr_base, &stmts, true, NULL); gimple_seq_add_seq (&stmt_list, stmts); fndecl = implicit_built_in_decls [BUILT_IN_MEMSET]; fntype = TREE_TYPE (fndecl); fn = build1 (ADDR_EXPR, build_pointer_type (fntype), fndecl); if (!nb_bytes) nb_bytes = build_size_arg_loc (loc, nb_iter, op0, &stmt_list); fn_call = gimple_build_call (fn, 3, mem, integer_zero_node, nb_bytes); gimple_seq_add_stmt (&stmt_list, fn_call); for (i = gsi_start (stmt_list); !gsi_end_p (i); gsi_next (&i)) { gimple s = gsi_stmt (i); update_stmt_if_modified (s); } gsi_insert_seq_after (&bsi, stmt_list, GSI_CONTINUE_LINKING); res = true; if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, "generated memset zero\n"); end: free_data_ref (dr); return res; }
static unsigned int lower_function_body (void) { struct lower_data data; gimple_seq body = gimple_body (current_function_decl); gimple_seq lowered_body; gimple_stmt_iterator i; gimple bind; gimple x; /* The gimplifier should've left a body of exactly one statement, namely a GIMPLE_BIND. */ gcc_assert (gimple_seq_first (body) == gimple_seq_last (body) && gimple_code (gimple_seq_first_stmt (body)) == GIMPLE_BIND); memset (&data, 0, sizeof (data)); data.block = DECL_INITIAL (current_function_decl); BLOCK_SUBBLOCKS (data.block) = NULL_TREE; BLOCK_CHAIN (data.block) = NULL_TREE; TREE_ASM_WRITTEN (data.block) = 1; data.return_statements.create (8); bind = gimple_seq_first_stmt (body); lowered_body = NULL; gimple_seq_add_stmt (&lowered_body, bind); i = gsi_start (lowered_body); lower_gimple_bind (&i, &data); i = gsi_last (lowered_body); /* If the function falls off the end, we need a null return statement. If we've already got one in the return_statements vector, we don't need to do anything special. Otherwise build one by hand. */ if (gimple_seq_may_fallthru (lowered_body) && (data.return_statements.is_empty () || (gimple_return_retval (data.return_statements.last().stmt) != NULL))) { x = gimple_build_return (NULL); gimple_set_location (x, cfun->function_end_locus); gimple_set_block (x, DECL_INITIAL (current_function_decl)); gsi_insert_after (&i, x, GSI_CONTINUE_LINKING); } /* If we lowered any return statements, emit the representative at the end of the function. */ while (!data.return_statements.is_empty ()) { return_statements_t t = data.return_statements.pop (); x = gimple_build_label (t.label); gsi_insert_after (&i, x, GSI_CONTINUE_LINKING); gsi_insert_after (&i, t.stmt, GSI_CONTINUE_LINKING); } /* Once the old body has been lowered, replace it with the new lowered sequence. */ gimple_set_body (current_function_decl, lowered_body); gcc_assert (data.block == DECL_INITIAL (current_function_decl)); BLOCK_SUBBLOCKS (data.block) = blocks_nreverse (BLOCK_SUBBLOCKS (data.block)); clear_block_marks (data.block); data.return_statements.release (); return 0; }
static bool generate_memset_zero (gimple stmt, tree op0, tree nb_iter, gimple_stmt_iterator bsi) { tree t, addr_base; tree nb_bytes = NULL; bool res = false; gimple_seq stmts = NULL, stmt_list = NULL; gimple fn_call; tree mem, fndecl, fntype, fn; gimple_stmt_iterator i; ssa_op_iter iter; struct data_reference *dr = XCNEW (struct data_reference); DR_STMT (dr) = stmt; DR_REF (dr) = op0; if (!dr_analyze_innermost (dr)) goto end; /* Test for a positive stride, iterating over every element. */ if (integer_zerop (fold_build2 (MINUS_EXPR, integer_type_node, DR_STEP (dr), TYPE_SIZE_UNIT (TREE_TYPE (op0))))) { tree offset = fold_convert (sizetype, size_binop (PLUS_EXPR, DR_OFFSET (dr), DR_INIT (dr))); addr_base = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (DR_BASE_ADDRESS (dr)), DR_BASE_ADDRESS (dr), offset); } /* Test for a negative stride, iterating over every element. */ else if (integer_zerop (fold_build2 (PLUS_EXPR, integer_type_node, TYPE_SIZE_UNIT (TREE_TYPE (op0)), DR_STEP (dr)))) { nb_bytes = build_size_arg (nb_iter, op0, &stmt_list); addr_base = size_binop (PLUS_EXPR, DR_OFFSET (dr), DR_INIT (dr)); addr_base = fold_build2 (MINUS_EXPR, sizetype, addr_base, nb_bytes); addr_base = force_gimple_operand (addr_base, &stmts, true, NULL); gimple_seq_add_seq (&stmt_list, stmts); addr_base = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (DR_BASE_ADDRESS (dr)), DR_BASE_ADDRESS (dr), addr_base); } else goto end; mem = force_gimple_operand (addr_base, &stmts, true, NULL); gimple_seq_add_seq (&stmt_list, stmts); fndecl = implicit_built_in_decls [BUILT_IN_MEMSET]; fntype = TREE_TYPE (fndecl); fn = build1 (ADDR_EXPR, build_pointer_type (fntype), fndecl); if (!nb_bytes) nb_bytes = build_size_arg (nb_iter, op0, &stmt_list); fn_call = gimple_build_call (fn, 3, mem, integer_zero_node, nb_bytes); gimple_seq_add_stmt (&stmt_list, fn_call); for (i = gsi_start (stmt_list); !gsi_end_p (i); gsi_next (&i)) { gimple s = gsi_stmt (i); update_stmt_if_modified (s); FOR_EACH_SSA_TREE_OPERAND (t, s, iter, SSA_OP_VIRTUAL_DEFS) { if (TREE_CODE (t) == SSA_NAME) t = SSA_NAME_VAR (t); mark_sym_for_renaming (t); } } /* Mark also the uses of the VDEFS of STMT to be renamed. */ FOR_EACH_SSA_TREE_OPERAND (t, stmt, iter, SSA_OP_VIRTUAL_DEFS) { if (TREE_CODE (t) == SSA_NAME) { gimple s; imm_use_iterator imm_iter; FOR_EACH_IMM_USE_STMT (s, imm_iter, t) update_stmt (s); t = SSA_NAME_VAR (t); } mark_sym_for_renaming (t); } gsi_insert_seq_after (&bsi, stmt_list, GSI_CONTINUE_LINKING); res = true; if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, "generated memset zero\n"); todo |= TODO_rebuild_alias; end: free_data_ref (dr); return res; }
static unsigned int execute_trace () { gimple_seq body, body_bind_body, inner_cleanup, outer_cleanup; gimple inner_try, outer_try; tree record_type, func_start_decl, func_end_decl, var_decl, function_name_decl, constructor_clobber; gimple call_func_start; gimple_stmt_iterator gsi; // build record type record_type = build_type (); // build start & end function decl func_start_decl = build_function_decl ("__start_ctrace__", record_type); func_end_decl = build_function_decl ("__end_ctrace__", record_type); // init variables of current body body = gimple_body (current_function_decl); var_decl = build_decl (UNKNOWN_LOCATION, VAR_DECL, get_identifier ("__ctrace_var__"), record_type); DECL_CONTEXT (var_decl) = current_function_decl; TREE_ADDRESSABLE (var_decl) = 1; declare_vars (var_decl, body, false); TREE_USED (var_decl) = 1; // mimic __FUNCTION__ builtin. function_name_decl = make_fname_decl (); declare_vars (function_name_decl, body, false); // construct inner try // init calls call_func_start = gimple_build_call ( func_start_decl, 2, build1 (ADDR_EXPR, build_pointer_type (record_type), var_decl), build1 (ADDR_EXPR, build_pointer_type (TREE_TYPE (function_name_decl)), function_name_decl)); // make inner clean up inner_cleanup = gimple_build_call ( func_end_decl, 2, build1 (ADDR_EXPR, build_pointer_type (record_type), var_decl), build1 (ADDR_EXPR, build_pointer_type (TREE_TYPE (function_name_decl)), function_name_decl)); // update inner try body_bind_body = gimple_bind_body (body); inner_try = gimple_build_try (body_bind_body, inner_cleanup, GIMPLE_TRY_FINALLY); gsi = gsi_start (inner_try); gsi_insert_before (&gsi, call_func_start, GSI_NEW_STMT); // construct outer try constructor_clobber = make_node (CONSTRUCTOR); TREE_THIS_VOLATILE (constructor_clobber) = 1; TREE_TYPE (constructor_clobber) = TREE_TYPE (var_decl); outer_cleanup = gimple_build_assign (var_decl, constructor_clobber); // update outer try outer_try = gimple_build_try (call_func_start, outer_cleanup, GIMPLE_TRY_FINALLY); // update body bind body gimple_bind_set_body (body, outer_try); if (dump_file) { dump_function_to_file (current_function_decl, dump_file, TDF_TREE | TDF_BLOCKS | TDF_VERBOSE); } // exit (0); return 0; }
gimple_stmt_iterator gsi_start_edge (edge e) { return gsi_start (PENDING_STMT (e)); }
/* Synthesize a CALL_EXPR and a TRY_FINALLY_EXPR, for this chain of _DECLs if appropriate. Arrange to call the __mf_register function now, and the __mf_unregister function later for each. Return the gimple sequence after synthesis. */ gimple_seq mx_register_decls (tree decl, gimple_seq seq, location_t location) { gimple_seq finally_stmts = NULL; gimple_stmt_iterator initially_stmts = gsi_start (seq); while (decl != NULL_TREE) { if (mf_decl_eligible_p (decl) /* Not already processed. */ && ! mf_marked_p (decl) /* Automatic variable. */ && ! DECL_EXTERNAL (decl) && ! TREE_STATIC (decl)) { tree size = NULL_TREE, variable_name; gimple unregister_fncall, register_fncall; tree unregister_fncall_param, register_fncall_param; /* Variable-sized objects should have sizes already been gimplified when we got here. */ size = fold_convert (size_type_node, TYPE_SIZE_UNIT (TREE_TYPE (decl))); gcc_assert (is_gimple_val (size)); unregister_fncall_param = mf_mark (build1 (ADDR_EXPR, build_pointer_type (TREE_TYPE (decl)), decl)); /* __mf_unregister (&VARIABLE, sizeof (VARIABLE), __MF_TYPE_STACK) */ unregister_fncall = gimple_build_call (mf_unregister_fndecl, 3, unregister_fncall_param, size, integer_three_node); variable_name = mf_varname_tree (decl); register_fncall_param = mf_mark (build1 (ADDR_EXPR, build_pointer_type (TREE_TYPE (decl)), decl)); /* __mf_register (&VARIABLE, sizeof (VARIABLE), __MF_TYPE_STACK, "name") */ register_fncall = gimple_build_call (mf_register_fndecl, 4, register_fncall_param, size, integer_three_node, variable_name); /* Accumulate the two calls. */ gimple_set_location (register_fncall, location); gimple_set_location (unregister_fncall, location); /* Add the __mf_register call at the current appending point. */ if (gsi_end_p (initially_stmts)) { if (!mf_artificial (decl)) warning (OPT_Wmudflap, "mudflap cannot track %qE in stub function", DECL_NAME (decl)); } else { gsi_insert_before (&initially_stmts, register_fncall, GSI_SAME_STMT); /* Accumulate the FINALLY piece. */ gimple_seq_add_stmt (&finally_stmts, unregister_fncall); } mf_mark (decl); } decl = DECL_CHAIN (decl); } /* Actually, (initially_stmts!=NULL) <=> (finally_stmts!=NULL) */ if (finally_stmts != NULL) { gimple stmt = gimple_build_try (seq, finally_stmts, GIMPLE_TRY_FINALLY); gimple_seq new_seq = NULL; gimple_seq_add_stmt (&new_seq, stmt); return new_seq; } else return seq; }
static unsigned int lower_function_body (void) { struct lower_data data; gimple_seq body = gimple_body (current_function_decl); gimple_seq lowered_body; gimple_stmt_iterator i; gimple *bind; gimple *x; /* The gimplifier should've left a body of exactly one statement, namely a GIMPLE_BIND. */ gcc_assert (gimple_seq_first (body) == gimple_seq_last (body) && gimple_code (gimple_seq_first_stmt (body)) == GIMPLE_BIND); memset (&data, 0, sizeof (data)); data.block = DECL_INITIAL (current_function_decl); BLOCK_SUBBLOCKS (data.block) = NULL_TREE; BLOCK_CHAIN (data.block) = NULL_TREE; TREE_ASM_WRITTEN (data.block) = 1; data.return_statements.create (8); bind = gimple_seq_first_stmt (body); lowered_body = NULL; gimple_seq_add_stmt (&lowered_body, bind); i = gsi_start (lowered_body); lower_gimple_bind (&i, &data); i = gsi_last (lowered_body); /* If we had begin stmt markers from e.g. PCH, but this compilation doesn't want them, lower_stmt will have cleaned them up; we can now clear the flag that indicates we had them. */ if (!MAY_HAVE_DEBUG_MARKER_STMTS && cfun->debug_nonbind_markers) { /* This counter needs not be exact, but before lowering it will most certainly be. */ gcc_assert (cfun->debug_marker_count == 0); cfun->debug_nonbind_markers = false; } /* If the function falls off the end, we need a null return statement. If we've already got one in the return_statements vector, we don't need to do anything special. Otherwise build one by hand. */ bool may_fallthru = gimple_seq_may_fallthru (lowered_body); if (may_fallthru && (data.return_statements.is_empty () || (gimple_return_retval (data.return_statements.last().stmt) != NULL))) { x = gimple_build_return (NULL); gimple_set_location (x, cfun->function_end_locus); gimple_set_block (x, DECL_INITIAL (current_function_decl)); gsi_insert_after (&i, x, GSI_CONTINUE_LINKING); may_fallthru = false; } /* If we lowered any return statements, emit the representative at the end of the function. */ while (!data.return_statements.is_empty ()) { return_statements_t t = data.return_statements.pop (); x = gimple_build_label (t.label); gsi_insert_after (&i, x, GSI_CONTINUE_LINKING); gsi_insert_after (&i, t.stmt, GSI_CONTINUE_LINKING); if (may_fallthru) { /* Remove the line number from the representative return statement. It now fills in for the fallthru too. Failure to remove this will result in incorrect results for coverage analysis. */ gimple_set_location (t.stmt, UNKNOWN_LOCATION); may_fallthru = false; } } /* Once the old body has been lowered, replace it with the new lowered sequence. */ gimple_set_body (current_function_decl, lowered_body); gcc_assert (data.block == DECL_INITIAL (current_function_decl)); BLOCK_SUBBLOCKS (data.block) = blocks_nreverse (BLOCK_SUBBLOCKS (data.block)); clear_block_marks (data.block); data.return_statements.release (); return 0; }