/** * Copies a node to a new irg. The Ins of the new node point to * the predecessors on the old irg. n->link points to the new node. * * @param n The node to be copied * @param irg the new irg * * Does NOT copy standard nodes like Start, End etc that are fixed * in an irg. Instead, the corresponding nodes of the new irg are returned. * Note further, that the new nodes have no block. */ static void copy_irn_to_irg(ir_node *n, ir_graph *irg) { /* do not copy standard nodes */ ir_node *nn = NULL; switch (get_irn_opcode(n)) { case iro_NoMem: nn = get_irg_no_mem(irg); break; case iro_Block: { ir_graph *old_irg = get_irn_irg(n); if (n == get_irg_start_block(old_irg)) nn = get_irg_start_block(irg); else if (n == get_irg_end_block(old_irg)) nn = get_irg_end_block(irg); break; } case iro_Start: nn = get_irg_start(irg); break; case iro_End: nn = get_irg_end(irg); break; case iro_Proj: { ir_graph *old_irg = get_irn_irg(n); if (n == get_irg_frame(old_irg)) nn = get_irg_frame(irg); else if (n == get_irg_initial_mem(old_irg)) nn = get_irg_initial_mem(irg); else if (n == get_irg_args(old_irg)) nn = get_irg_args(irg); break; } } if (nn) { set_irn_link(n, nn); return; } nn = new_ir_node(get_irn_dbg_info(n), irg, NULL, /* no block yet, will be set later */ get_irn_op(n), get_irn_mode(n), get_irn_arity(n), get_irn_in(n)); /* Copy the attributes. These might point to additional data. If this was allocated on the old obstack the pointers now are dangling. This frees e.g. the memory of the graph_arr allocated in new_immBlock. */ copy_node_attr(irg, n, nn); set_irn_link(n, nn); }
void compute_cdep(ir_graph *irg) { free_cdep(irg); cdep_data = XMALLOC(cdep_info); obstack_init(&cdep_data->obst); cdep_data->cdep_map = pmap_create(); assure_irg_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_POSTDOMINANCE); /* we must temporary change the post dominator relation: the ipdom of the startblock is the end block. Firm does NOT add the phantom edge from Start to End. */ ir_node *const start_block = get_irg_start_block(irg); ir_node *const end_block = get_irg_end_block(irg); ir_node *const rem = get_Block_ipostdom(start_block); set_Block_ipostdom(start_block, end_block); irg_block_walk_graph(irg, cdep_pre, NULL, NULL); (void) cdep_edge_hook; /* restore the post dominator relation */ set_Block_ipostdom(start_block, rem); }
ir_graph *create_irg_copy(ir_graph *irg) { ir_graph *res = alloc_graph(); res->irg_pinned_state = irg->irg_pinned_state; /* clone the frame type here for safety */ irp_reserve_resources(irp, IRP_RESOURCE_ENTITY_LINK); res->frame_type = clone_frame_type(irg->frame_type); ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK); /* copy all nodes from the graph irg to the new graph res */ irg_walk_anchors(irg, copy_all_nodes, rewire, res); /* copy the Anchor node */ res->anchor = get_new_node(irg->anchor); /* -- The end block -- */ set_irg_end_block (res, get_new_node(get_irg_end_block(irg))); set_irg_end (res, get_new_node(get_irg_end(irg))); /* -- The start block -- */ set_irg_start_block(res, get_new_node(get_irg_start_block(irg))); set_irg_no_mem (res, get_new_node(get_irg_no_mem(irg))); set_irg_start (res, get_new_node(get_irg_start(irg))); /* Proj results of start node */ set_irg_initial_mem(res, get_new_node(get_irg_initial_mem(irg))); ir_free_resources(irg, IR_RESOURCE_IRN_LINK); irp_free_resources(irp, IRP_RESOURCE_ENTITY_LINK); return res; }
void eh_end_method(void) { assert (! top->prev); // the explicit stuff is gone, we have the default handler if (top->used) { mature_immBlock(top->handler_header_block); assert (top->cur_block); // would fail if front end adds an catch all handler to the default handler ir_node *saved_block = get_cur_block(); set_cur_block(top->cur_block); ir_node *cur_mem = get_store(); ir_node *raise = new_Raise(cur_mem, top->exception_object); ir_node *proj = new_Proj(raise, mode_X, pn_Raise_X); cur_mem = new_Proj(raise, mode_M, pn_Raise_M); set_store(cur_mem); ir_node *end_block = get_irg_end_block(get_current_ir_graph()); add_immBlock_pred(end_block, proj); set_cur_block(saved_block); } obstack_free(&lpads, top); top = NULL; }
/** * Block-walker, remove Bad block predecessors and shorten Phis. * Phi links must be up-to-date. */ static void block_remove_bads(ir_node *block) { /* 1. Create a new block without Bad inputs */ ir_graph *irg = get_irn_irg(block); const int max = get_Block_n_cfgpreds(block); ir_node **new_in = ALLOCAN(ir_node*, max); unsigned new_max = 0; for (int i = 0; i < max; ++i) { ir_node *const block_pred = get_Block_cfgpred(block, i); if (!is_Bad(block_pred)) { new_in[new_max++] = block_pred; } } /* If the end block is unreachable, it might have zero predecessors. */ if (new_max == 0) { ir_node *end_block = get_irg_end_block(irg); if (block == end_block) { set_irn_in(block, new_max, new_in); return; } } dbg_info *dbgi = get_irn_dbg_info(block); ir_node *new_block = new_rd_Block(dbgi, irg, new_max, new_in); ir_entity *block_entity = get_Block_entity(block); set_Block_entity(new_block, block_entity); /* 2. Remove inputs on Phis, where the block input is Bad. */ for (ir_node *phi = get_Block_phis(block), *next; phi != NULL; phi = next) { next = get_Phi_next(phi); assert(get_irn_arity(phi) == max); unsigned j = 0; foreach_irn_in(phi, i, pred) { ir_node *const block_pred = get_Block_cfgpred(block, i); if (!is_Bad(block_pred)) { new_in[j++] = pred; } } assert(j == new_max); /* shortcut if only 1 phi input is left */ if (new_max == 1) { ir_node *new_node = new_in[0]; /* can happen inside unreachable endless loops */ if (new_node == phi) return; if (get_Phi_loop(phi)) remove_keep_alive(phi); exchange(phi, new_node); } else { set_irn_in(phi, new_max, new_in); } }
/* * Normalize the Returns of a graph by moving * the Returns upwards as much as possible. * This might be preferred for code generation. * * In pseudocode, it means: * * if (a) * res = b; * else * res = c; * return res; * * is transformed into * * if (a) * return b; * else * return c; */ void normalize_n_returns(ir_graph *irg) { int i, j, n; ir_node *list = NULL; ir_node *final = NULL; unsigned n_rets = 0; unsigned n_finals = 0; ir_node *endbl = get_irg_end_block(irg); int n_ret_vals; ir_node **in; ir_node *end; /* * First, link all returns: * These must be predecessors of the endblock. * Place Returns that can be moved on list, all others * on final. */ n = get_Block_n_cfgpreds(endbl); for (i = 0; i < n; ++i) { ir_node *ret = get_Block_cfgpred(endbl, i); if (is_Bad(ret)) { continue; } else if (is_Return(ret) && can_move_ret(ret)) { /* * Ok, all conditions met, we can move this Return, put it * on our work list. */ set_irn_link(ret, list); list = ret; ++n_rets; } else { /* Put all nodes that are not changed on the final list. */ set_irn_link(ret, final); final = ret; ++n_finals; } }
ir_graph *new_const_code_irg(void) { ir_graph *const res = new_r_ir_graph(NULL, 0); mature_immBlock(get_irg_end_block(res)); /* There is no Start node in the const_code_irg */ set_irg_start(res, new_r_Bad(res, mode_T)); set_irg_frame(res, new_r_Bad(res, mode_BAD)); set_irg_args(res, new_r_Bad(res, mode_T)); set_irg_initial_mem(res, new_r_Bad(res, mode_M)); set_r_store(res, get_irg_no_mem(res)); /* Set the visited flag high enough that the blocks will never be * visited. */ ir_node *const body_block = get_r_cur_block(res); set_irn_visited(body_block, -1); set_Block_block_visited(body_block, -1); ir_node *const start_block = get_irg_start_block(res); set_Block_block_visited(start_block, -1); set_irn_visited(start_block, -1); return res; }
#include "irtools.h" #include "util.h" /** * Block-walker: adds the visited block to a flexible array. */ static void add_to_postorder(ir_node *block, void *data) { ir_node ***list = (ir_node***) data; ARR_APP1(ir_node*, *list, block); } ir_node **be_get_cfgpostorder(ir_graph *irg) { ir_node **list = NEW_ARR_F(ir_node*, 0); ir_node *end_block = get_irg_end_block(irg); /* end block may be unreachable in case of endless loops */ if (get_Block_n_cfgpreds(end_block) == 0) ARR_APP1(ir_node*, list, end_block); /* walk blocks */ irg_block_edges_walk(get_irg_start_block(irg), NULL, add_to_postorder, &list); return list; } static int cmp_node_nr(const void *a, const void *b) { ir_node **p1 = (ir_node**)a;
/* * Normalize the Returns of a graph by creating a new End block * with One Return(Phi). * This is the preferred input for the if-conversion. * * In pseudocode, it means: * * if (a) * return b; * else * return c; * * is transformed into * * if (a) * res = b; * else * res = c; * return res; */ void normalize_one_return(ir_graph *irg) { ir_node *endbl = get_irg_end_block(irg); ir_entity *entity = get_irg_entity(irg); ir_type *type = get_entity_type(entity); int n_ret_vals = get_method_n_ress(type) + 1; int n_rets = 0; bool filter_dbgi = false; dbg_info *combined_dbgi = NULL; int i, j, k, n, last_idx; ir_node **in, **retvals, **endbl_in; ir_node *block; /* look, if we have more than one return */ n = get_Block_n_cfgpreds(endbl); if (n <= 0) { /* The end block has no predecessors, we have an endless loop. In that case, no returns exists. */ confirm_irg_properties(irg, IR_GRAPH_PROPERTIES_ALL); add_irg_properties(irg, IR_GRAPH_PROPERTY_ONE_RETURN); return; } unsigned *const returns = rbitset_alloca(n); for (i = 0; i < n; ++i) { ir_node *node = get_Block_cfgpred(endbl, i); if (is_Return(node)) { dbg_info *dbgi = get_irn_dbg_info(node); if (dbgi != NULL && dbgi != combined_dbgi) { if (filter_dbgi) { combined_dbgi = NULL; } else { combined_dbgi = dbgi; filter_dbgi = true; } } ++n_rets; rbitset_set(returns, i); } } if (n_rets <= 1) { confirm_irg_properties(irg, IR_GRAPH_PROPERTIES_ALL); add_irg_properties(irg, IR_GRAPH_PROPERTY_ONE_RETURN); return; } in = ALLOCAN(ir_node*, MAX(n_rets, n_ret_vals)); retvals = ALLOCAN(ir_node*, n_rets * n_ret_vals); endbl_in = ALLOCAN(ir_node*, n); last_idx = 0; for (j = i = 0; i < n; ++i) { ir_node *ret = get_Block_cfgpred(endbl, i); if (rbitset_is_set(returns, i)) { ir_node *block = get_nodes_block(ret); /* create a new Jmp for every Ret and place the in in */ in[j] = new_r_Jmp(block); /* save the return values and shuffle them */ for (k = 0; k < n_ret_vals; ++k) retvals[j + k*n_rets] = get_irn_n(ret, k); ++j; } else { endbl_in[last_idx++] = ret; } } /* ok, create a new block with all created in's */ block = new_r_Block(irg, n_rets, in); /* now create the Phi nodes */ for (j = i = 0; i < n_ret_vals; ++i, j += n_rets) { ir_mode *mode = get_irn_mode(retvals[j]); in[i] = new_r_Phi(block, n_rets, &retvals[j], mode); } endbl_in[last_idx++] = new_rd_Return(combined_dbgi, block, in[0], n_ret_vals-1, &in[1]); set_irn_in(endbl, last_idx, endbl_in); /* invalidate analysis information: * a new Block was added, so dominator, outs and loop are inconsistent, * trouts and callee-state should be still valid */ confirm_irg_properties(irg, IR_GRAPH_PROPERTY_NO_BADS | IR_GRAPH_PROPERTY_NO_TUPLES | IR_GRAPH_PROPERTY_NO_CRITICAL_EDGES | IR_GRAPH_PROPERTY_NO_UNREACHABLE_CODE | IR_GRAPH_PROPERTY_CONSISTENT_ENTITY_USAGE); add_irg_properties(irg, IR_GRAPH_PROPERTY_ONE_RETURN); }
/** * Pre-walker for connecting DAGs and counting. */ static void connect_dags(ir_node *node, void *env) { dag_env_t *dag_env = (dag_env_t*)env; int i, arity; ir_node *block; dag_entry_t *entry; ir_mode *mode; if (is_Block(node)) return; block = get_nodes_block(node); /* ignore start end end blocks */ ir_graph *const irg = get_Block_irg(block); if (block == get_irg_start_block(irg) || block == get_irg_end_block(irg)) return; /* ignore Phi nodes */ if (is_Phi(node)) return; if (dag_env->options & FIRMSTAT_ARGS_ARE_ROOTS && is_arg(node)) return; mode = get_irn_mode(node); if (mode == mode_X || mode == mode_M) { /* do NOT count mode_X and mode_M nodes */ return; } /* if this option is set, Loads are always leaves */ if (dag_env->options & FIRMSTAT_LOAD_IS_LEAVE && is_Load(node)) return; if (dag_env->options & FIRMSTAT_CALL_IS_LEAVE && is_Call(node)) return; entry = get_irn_dag_entry(node); if (! entry) { /* found an unassigned node, maybe a new root */ entry = new_dag_entry(dag_env, node); } /* put the predecessors into the same DAG as the current */ for (i = 0, arity = get_irn_arity(node); i < arity; ++i) { ir_node *prev = get_irn_n(node, i); ir_mode *mode = get_irn_mode(prev); if (is_Phi(prev)) continue; if (mode == mode_X || mode == mode_M) continue; /* * copy constants if requested into the DAG's * beware, do NOT add a link, as this will result in * wrong intersections */ if (dag_env->options & FIRMSTAT_COPY_CONSTANTS) { if (is_irn_constlike(prev)) { ++entry->num_nodes; ++entry->num_inner_nodes; } } /* only nodes from the same block goes into the DAG */ if (get_nodes_block(prev) == block) { dag_entry_t *prev_entry = get_irn_dag_entry(prev); if (! prev_entry) { /* not assigned node, put it into the same DAG */ set_irn_dag_entry(prev, entry); ++entry->num_nodes; ++entry->num_inner_nodes; } else { if (prev_entry == entry) { /* We found a node that is already assigned to this DAG. This DAG is not a tree. */ entry->is_tree = 0; } else { /* two DAGs intersect: copy the data to one of them and kill the other */ entry->num_roots += prev_entry->num_roots; entry->num_nodes += prev_entry->num_nodes; entry->num_inner_nodes += prev_entry->num_inner_nodes; entry->is_tree &= prev_entry->is_tree; --dag_env->num_of_dags; prev_entry->is_dead = 1; prev_entry->link = entry; } } } } }
/** * Post-walker to detect DAG roots that are referenced form other blocks */ static void find_dag_roots(ir_node *node, void *env) { dag_env_t *dag_env = (dag_env_t*)env; int i, arity; dag_entry_t *entry; ir_node *block; if (is_Block(node)) return; block = get_nodes_block(node); /* ignore start end end blocks */ ir_graph *const irg = get_Block_irg(block); if (block == get_irg_start_block(irg) || block == get_irg_end_block(irg)) return; /* Phi nodes always references nodes from "other" block */ if (is_Phi(node)) { if (get_irn_mode(node) != mode_M) { for (i = 0, arity = get_irn_arity(node); i < arity; ++i) { ir_node *prev = get_irn_n(node, i); if (is_Phi(prev)) continue; if (dag_env->options & FIRMSTAT_COPY_CONSTANTS) { if (is_irn_constlike(prev)) continue; } entry = get_irn_dag_entry(prev); if (! entry) { /* found an unassigned node, a new root */ entry = new_dag_entry(dag_env, node); entry->is_ext_ref = 1; } } } } else { for (i = 0, arity = get_irn_arity(node); i < arity; ++i) { ir_node *prev = get_irn_n(node, i); ir_mode *mode = get_irn_mode(prev); if (mode == mode_X || mode == mode_M) continue; if (is_Phi(prev)) continue; if (dag_env->options & FIRMSTAT_COPY_CONSTANTS) { if (is_irn_constlike(prev)) continue; } if (get_nodes_block(prev) != block) { /* The predecessor is from another block. It forms a root. */ entry = get_irn_dag_entry(prev); if (! entry) { /* found an unassigned node, a new root */ entry = new_dag_entry(dag_env, node); entry->is_ext_ref = 1; } } } } }