/** * Block-walker, remove Bad block predecessors and shorten Phis. * Phi links must be up-to-date. */ static void block_remove_bads(ir_node *block) { /* 1. Create a new block without Bad inputs */ ir_graph *irg = get_irn_irg(block); const int max = get_Block_n_cfgpreds(block); ir_node **new_in = ALLOCAN(ir_node*, max); unsigned new_max = 0; for (int i = 0; i < max; ++i) { ir_node *const block_pred = get_Block_cfgpred(block, i); if (!is_Bad(block_pred)) { new_in[new_max++] = block_pred; } } /* If the end block is unreachable, it might have zero predecessors. */ if (new_max == 0) { ir_node *end_block = get_irg_end_block(irg); if (block == end_block) { set_irn_in(block, new_max, new_in); return; } } dbg_info *dbgi = get_irn_dbg_info(block); ir_node *new_block = new_rd_Block(dbgi, irg, new_max, new_in); ir_entity *block_entity = get_Block_entity(block); set_Block_entity(new_block, block_entity); /* 2. Remove inputs on Phis, where the block input is Bad. */ for (ir_node *phi = get_Block_phis(block), *next; phi != NULL; phi = next) { next = get_Phi_next(phi); assert(get_irn_arity(phi) == max); unsigned j = 0; foreach_irn_in(phi, i, pred) { ir_node *const block_pred = get_Block_cfgpred(block, i); if (!is_Bad(block_pred)) { new_in[j++] = pred; } } assert(j == new_max); /* shortcut if only 1 phi input is left */ if (new_max == 1) { ir_node *new_node = new_in[0]; /* can happen inside unreachable endless loops */ if (new_node == phi) return; if (get_Phi_loop(phi)) remove_keep_alive(phi); exchange(phi, new_node); } else { set_irn_in(phi, new_max, new_in); } }
ir_node *be_complete_Phi(ir_node *const phi, unsigned const n_ins, ir_node **const ins) { assert(is_Phi(phi) && get_Phi_n_preds(phi) == 0); ir_graph *const irg = get_irn_irg(phi); phi->attr.phi.u.backedge = new_backedge_arr(get_irg_obstack(irg), n_ins); set_irn_in(phi, n_ins, ins); arch_register_req_t const **const in_reqs = be_allocate_in_reqs(irg, n_ins); arch_register_req_t const *const req = arch_get_irn_register_req(phi); for (unsigned i = 0; i < n_ins; ++i) { in_reqs[i] = req; } backend_info_t *const info = be_get_info(phi); info->in_reqs = in_reqs; verify_new_node(phi); return phi; }
/** * Computes the predecessors for the real phi node, and then * allocates and returns this node. The routine called to allocate the * node might optimize it away and return a real value. * This function must be called with an in-array of proper size. */ static ir_node *set_phi_arguments(ir_node *phi, int pos) { ir_node *block = get_nodes_block(phi); ir_graph *irg = get_irn_irg(block); int arity = get_irn_arity(block); ir_node **in = ALLOCAN(ir_node*, arity); ir_mode *mode = get_irn_mode(phi); /* This loop goes to all predecessor blocks of the block the Phi node * is in and there finds the operands of the Phi node by calling * get_r_value_internal. */ for (int i = 0; i < arity; ++i) { ir_node *cfgpred = get_Block_cfgpred_block(block, i); ir_node *value; if (cfgpred == NULL) { value = new_r_Bad(irg, mode); } else { value = get_r_value_internal(cfgpred, pos, mode); } in[i] = value; } phi->attr.phi.u.backedge = new_backedge_arr(get_irg_obstack(irg), arity); set_irn_in(phi, arity, in); verify_new_node(phi); try_remove_unnecessary_phi(phi); /* To solve the problem of (potentially) endless loops being observable * behaviour we add a keep-alive edge too all PhiM nodes. */ if (mode == mode_M && !is_Id(phi)) { phi->attr.phi.loop = true; keep_alive(phi); } return phi; }
/** * lower 64bit conversions */ static void ia32_lower_conv64(ir_node *node, ir_mode *mode) { dbg_info *dbg = get_irn_dbg_info(node); ir_node *op = get_Conv_op(node); ir_mode *mode_from = get_irn_mode(op); ir_mode *mode_to = get_irn_mode(node); if (mode_is_float(mode_from) && get_mode_size_bits(mode_to) == 64 && get_mode_arithmetic(mode_to) == irma_twos_complement) { /* We have a Conv float -> long long here */ ir_node *float_to_ll; ir_node *l_res; ir_node *h_res; if (mode_is_signed(mode)) { /* convert from float to signed 64bit */ ir_node *block = get_nodes_block(node); float_to_ll = new_bd_ia32_l_FloattoLL(dbg, block, op); l_res = new_r_Proj(float_to_ll, ia32_mode_gp, pn_ia32_l_FloattoLL_res_low); h_res = new_r_Proj(float_to_ll, mode, pn_ia32_l_FloattoLL_res_high); } else { /* Convert from float to unsigned 64bit. */ ir_graph *irg = get_irn_irg(node); ir_tarval *flt_tv = new_tarval_from_str("9223372036854775808", 19, x86_mode_E); ir_node *flt_corr = new_r_Const(irg, flt_tv); ir_node *lower_blk = part_block_dw(node); ir_node *upper_blk = get_nodes_block(node); set_dw_control_flow_changed(); ir_node *opc = new_rd_Conv(dbg, upper_blk, op, x86_mode_E); ir_node *cmp = new_rd_Cmp(dbg, upper_blk, opc, flt_corr, ir_relation_less); ir_node *cond = new_rd_Cond(dbg, upper_blk, cmp); ir_node *in[] = { new_r_Proj(cond, mode_X, pn_Cond_true), new_r_Proj(cond, mode_X, pn_Cond_false) }; ir_node *blk = new_r_Block(irg, 1, &in[1]); in[1] = new_r_Jmp(blk); set_irn_in(lower_blk, 2, in); /* create to Phis */ ir_node *phi_in[] = { new_r_Const_null(irg, mode), new_r_Const_long(irg, mode, 0x80000000) }; ir_node *int_phi = new_r_Phi(lower_blk, ARRAY_SIZE(phi_in), phi_in, mode); ir_node *fphi_in[] = { opc, new_rd_Sub(dbg, upper_blk, opc, flt_corr, x86_mode_E) }; ir_node *flt_phi = new_r_Phi(lower_blk, ARRAY_SIZE(fphi_in), fphi_in, x86_mode_E); /* fix Phi links for next part_block() */ if (is_Phi(int_phi)) add_Block_phi(lower_blk, int_phi); if (is_Phi(flt_phi)) add_Block_phi(lower_blk, flt_phi); float_to_ll = new_bd_ia32_l_FloattoLL(dbg, lower_blk, flt_phi); l_res = new_r_Proj(float_to_ll, ia32_mode_gp, pn_ia32_l_FloattoLL_res_low); h_res = new_r_Proj(float_to_ll, mode, pn_ia32_l_FloattoLL_res_high); h_res = new_rd_Add(dbg, lower_blk, h_res, int_phi, mode); /* move the call and its Proj's to the lower block */ set_nodes_block(node, lower_blk); for (ir_node *proj = (ir_node*)get_irn_link(node); proj != NULL; proj = (ir_node*)get_irn_link(proj)) { set_nodes_block(proj, lower_blk); } } ir_set_dw_lowered(node, l_res, h_res); } else if (get_mode_size_bits(mode_from) == 64 && get_mode_arithmetic(mode_from) == irma_twos_complement && mode_is_float(mode_to)) { /* We have a Conv long long -> float here */ ir_node *op_low = get_lowered_low(op); ir_node *op_high = get_lowered_high(op); ir_node *block = get_nodes_block(node); ir_node *ll_to_float = new_bd_ia32_l_LLtoFloat(dbg, block, op_high, op_low, mode_to); exchange(node, ll_to_float); } else { ir_default_lower_dw_Conv(node, mode); } }
/* * Normalize the Returns of a graph by creating a new End block * with One Return(Phi). * This is the preferred input for the if-conversion. * * In pseudocode, it means: * * if (a) * return b; * else * return c; * * is transformed into * * if (a) * res = b; * else * res = c; * return res; */ void normalize_one_return(ir_graph *irg) { ir_node *endbl = get_irg_end_block(irg); ir_entity *entity = get_irg_entity(irg); ir_type *type = get_entity_type(entity); int n_ret_vals = get_method_n_ress(type) + 1; int n_rets = 0; bool filter_dbgi = false; dbg_info *combined_dbgi = NULL; int i, j, k, n, last_idx; ir_node **in, **retvals, **endbl_in; ir_node *block; /* look, if we have more than one return */ n = get_Block_n_cfgpreds(endbl); if (n <= 0) { /* The end block has no predecessors, we have an endless loop. In that case, no returns exists. */ confirm_irg_properties(irg, IR_GRAPH_PROPERTIES_ALL); add_irg_properties(irg, IR_GRAPH_PROPERTY_ONE_RETURN); return; } unsigned *const returns = rbitset_alloca(n); for (i = 0; i < n; ++i) { ir_node *node = get_Block_cfgpred(endbl, i); if (is_Return(node)) { dbg_info *dbgi = get_irn_dbg_info(node); if (dbgi != NULL && dbgi != combined_dbgi) { if (filter_dbgi) { combined_dbgi = NULL; } else { combined_dbgi = dbgi; filter_dbgi = true; } } ++n_rets; rbitset_set(returns, i); } } if (n_rets <= 1) { confirm_irg_properties(irg, IR_GRAPH_PROPERTIES_ALL); add_irg_properties(irg, IR_GRAPH_PROPERTY_ONE_RETURN); return; } in = ALLOCAN(ir_node*, MAX(n_rets, n_ret_vals)); retvals = ALLOCAN(ir_node*, n_rets * n_ret_vals); endbl_in = ALLOCAN(ir_node*, n); last_idx = 0; for (j = i = 0; i < n; ++i) { ir_node *ret = get_Block_cfgpred(endbl, i); if (rbitset_is_set(returns, i)) { ir_node *block = get_nodes_block(ret); /* create a new Jmp for every Ret and place the in in */ in[j] = new_r_Jmp(block); /* save the return values and shuffle them */ for (k = 0; k < n_ret_vals; ++k) retvals[j + k*n_rets] = get_irn_n(ret, k); ++j; } else { endbl_in[last_idx++] = ret; } } /* ok, create a new block with all created in's */ block = new_r_Block(irg, n_rets, in); /* now create the Phi nodes */ for (j = i = 0; i < n_ret_vals; ++i, j += n_rets) { ir_mode *mode = get_irn_mode(retvals[j]); in[i] = new_r_Phi(block, n_rets, &retvals[j], mode); } endbl_in[last_idx++] = new_rd_Return(combined_dbgi, block, in[0], n_ret_vals-1, &in[1]); set_irn_in(endbl, last_idx, endbl_in); /* invalidate analysis information: * a new Block was added, so dominator, outs and loop are inconsistent, * trouts and callee-state should be still valid */ confirm_irg_properties(irg, IR_GRAPH_PROPERTY_NO_BADS | IR_GRAPH_PROPERTY_NO_TUPLES | IR_GRAPH_PROPERTY_NO_CRITICAL_EDGES | IR_GRAPH_PROPERTY_NO_UNREACHABLE_CODE | IR_GRAPH_PROPERTY_CONSISTENT_ENTITY_USAGE); add_irg_properties(irg, IR_GRAPH_PROPERTY_ONE_RETURN); }