Esempio n. 1
0
/**
 * Check, whether a Return can be moved on block upwards.
 *
 * In a block with a Return, all live nodes must be linked
 * with the Return, otherwise they are dead (because the Return leaves
 * the graph, so no more users of the other nodes can exists.
 *
 * We can move a Return, if its predecessors are Phi nodes or
 * comes from another block. In the later case, it is always possible
 * to move the Return one block up, because the predecessor block must
 * dominate the Return block (SSA) and then it dominates the predecessor
 * block of the Return block as well.
 *
 * All predecessors of the Return block must be Jmp's of course, or we
 * cannot move it up, so we add blocks if needed.
 */
static bool can_move_ret(ir_node *ret)
{
    ir_node *retbl = get_nodes_block(ret);
    int i, n = get_irn_arity(ret);

    for (i = 0; i < n; ++i) {
        ir_node *pred = get_irn_n(ret, i);

        if (! is_Phi(pred) && retbl == get_nodes_block(pred)) {
            /* first condition failed, found a non-Phi predecessor
             * then is in the Return block */
            return false;
        }
    }

    /* check, that predecessors are Jmps */
    n = get_Block_n_cfgpreds(retbl);
    /* we cannot move above a labeled block, as this might kill the block */
    if (n <= 1 || get_Block_entity(retbl) != NULL)
        return false;
    for (i = 0; i < n; ++i) {
        ir_node *pred = get_Block_cfgpred(retbl, i);

        pred = skip_Tuple(pred);
        if (! is_Jmp(pred) && !is_Bad(pred)) {
            /* simply place a new block here */
            ir_graph *irg  = get_irn_irg(retbl);
            ir_node *block = new_r_Block(irg, 1, &pred);
            ir_node *jmp   = new_r_Jmp(block);
            set_Block_cfgpred(retbl, i, jmp);
        }
    }
    return true;
}
Esempio n. 2
0
/**
 * lower 64bit conversions
 */
static void ia32_lower_conv64(ir_node *node, ir_mode *mode)
{
	dbg_info  *dbg       = get_irn_dbg_info(node);
	ir_node   *op        = get_Conv_op(node);
	ir_mode   *mode_from = get_irn_mode(op);
	ir_mode   *mode_to   = get_irn_mode(node);

	if (mode_is_float(mode_from) && get_mode_size_bits(mode_to) == 64
	    && get_mode_arithmetic(mode_to) == irma_twos_complement) {
		/* We have a Conv float -> long long here */
		ir_node *float_to_ll;
		ir_node *l_res;
		ir_node *h_res;
		if (mode_is_signed(mode)) {
			/* convert from float to signed 64bit */
			ir_node *block = get_nodes_block(node);
			float_to_ll = new_bd_ia32_l_FloattoLL(dbg, block, op);
			l_res = new_r_Proj(float_to_ll, ia32_mode_gp,
			                   pn_ia32_l_FloattoLL_res_low);
			h_res = new_r_Proj(float_to_ll, mode,
							   pn_ia32_l_FloattoLL_res_high);
		} else {
			/* Convert from float to unsigned 64bit. */
			ir_graph  *irg = get_irn_irg(node);
			ir_tarval *flt_tv
				= new_tarval_from_str("9223372036854775808", 19, x86_mode_E);
			ir_node   *flt_corr  = new_r_Const(irg, flt_tv);

			ir_node *lower_blk = part_block_dw(node);
			ir_node *upper_blk = get_nodes_block(node);
			set_dw_control_flow_changed();

			ir_node *opc  = new_rd_Conv(dbg, upper_blk, op, x86_mode_E);
			ir_node *cmp  = new_rd_Cmp(dbg, upper_blk, opc, flt_corr,
			                           ir_relation_less);
			ir_node *cond = new_rd_Cond(dbg, upper_blk, cmp);
			ir_node *in[] = {
				new_r_Proj(cond, mode_X, pn_Cond_true),
				new_r_Proj(cond, mode_X, pn_Cond_false)
			};
			ir_node *blk   = new_r_Block(irg, 1, &in[1]);
			in[1] = new_r_Jmp(blk);

			set_irn_in(lower_blk, 2, in);

			/* create to Phis */
			ir_node *phi_in[] = {
				new_r_Const_null(irg, mode),
				new_r_Const_long(irg, mode, 0x80000000)
			};
			ir_node *int_phi
				= new_r_Phi(lower_blk, ARRAY_SIZE(phi_in), phi_in, mode);

			ir_node *fphi_in[] = {
				opc,
				new_rd_Sub(dbg, upper_blk, opc, flt_corr, x86_mode_E)
			};
			ir_node *flt_phi
				= new_r_Phi(lower_blk, ARRAY_SIZE(fphi_in), fphi_in,
				            x86_mode_E);

			/* fix Phi links for next part_block() */
			if (is_Phi(int_phi))
				add_Block_phi(lower_blk, int_phi);
			if (is_Phi(flt_phi))
				add_Block_phi(lower_blk, flt_phi);

			float_to_ll = new_bd_ia32_l_FloattoLL(dbg, lower_blk, flt_phi);
			l_res = new_r_Proj(float_to_ll, ia32_mode_gp,
							   pn_ia32_l_FloattoLL_res_low);
			h_res = new_r_Proj(float_to_ll, mode,
							   pn_ia32_l_FloattoLL_res_high);
			h_res = new_rd_Add(dbg, lower_blk, h_res, int_phi, mode);

			/* move the call and its Proj's to the lower block */
			set_nodes_block(node, lower_blk);
			for (ir_node *proj = (ir_node*)get_irn_link(node); proj != NULL;
			     proj = (ir_node*)get_irn_link(proj)) {
				set_nodes_block(proj, lower_blk);
			}
		}
		ir_set_dw_lowered(node, l_res, h_res);
	} else if (get_mode_size_bits(mode_from) == 64
	           && get_mode_arithmetic(mode_from) == irma_twos_complement
	           && mode_is_float(mode_to)) {
		/* We have a Conv long long -> float here */
		ir_node *op_low  = get_lowered_low(op);
		ir_node *op_high = get_lowered_high(op);
		ir_node *block   = get_nodes_block(node);
		ir_node *ll_to_float
			= new_bd_ia32_l_LLtoFloat(dbg, block, op_high, op_low, mode_to);

		exchange(node, ll_to_float);
	} else {
		ir_default_lower_dw_Conv(node, mode);
	}
}
Esempio n. 3
0
/*
 * Normalize the Returns of a graph by creating a new End block
 * with One Return(Phi).
 * This is the preferred input for the if-conversion.
 *
 * In pseudocode, it means:
 *
 * if (a)
 *   return b;
 * else
 *   return c;
 *
 * is transformed into
 *
 * if (a)
 *   res = b;
 * else
 *   res = c;
 * return res;
 */
void normalize_one_return(ir_graph *irg)
{
    ir_node   *endbl         = get_irg_end_block(irg);
    ir_entity *entity        = get_irg_entity(irg);
    ir_type   *type          = get_entity_type(entity);
    int        n_ret_vals    = get_method_n_ress(type) + 1;
    int        n_rets        = 0;
    bool       filter_dbgi   = false;
    dbg_info  *combined_dbgi = NULL;
    int i, j, k, n, last_idx;
    ir_node **in, **retvals, **endbl_in;
    ir_node *block;

    /* look, if we have more than one return */
    n = get_Block_n_cfgpreds(endbl);
    if (n <= 0) {
        /* The end block has no predecessors, we have an endless
           loop. In that case, no returns exists. */
        confirm_irg_properties(irg, IR_GRAPH_PROPERTIES_ALL);
        add_irg_properties(irg, IR_GRAPH_PROPERTY_ONE_RETURN);
        return;
    }

    unsigned *const returns = rbitset_alloca(n);
    for (i = 0; i < n; ++i) {
        ir_node *node = get_Block_cfgpred(endbl, i);

        if (is_Return(node)) {
            dbg_info *dbgi = get_irn_dbg_info(node);

            if (dbgi != NULL && dbgi != combined_dbgi) {
                if (filter_dbgi) {
                    combined_dbgi = NULL;
                } else {
                    combined_dbgi = dbgi;
                    filter_dbgi   = true;
                }
            }

            ++n_rets;
            rbitset_set(returns, i);
        }
    }

    if (n_rets <= 1) {
        confirm_irg_properties(irg, IR_GRAPH_PROPERTIES_ALL);
        add_irg_properties(irg, IR_GRAPH_PROPERTY_ONE_RETURN);
        return;
    }

    in       = ALLOCAN(ir_node*, MAX(n_rets, n_ret_vals));
    retvals  = ALLOCAN(ir_node*, n_rets * n_ret_vals);
    endbl_in = ALLOCAN(ir_node*, n);

    last_idx = 0;
    for (j = i = 0; i < n; ++i) {
        ir_node *ret = get_Block_cfgpred(endbl, i);

        if (rbitset_is_set(returns, i)) {
            ir_node *block = get_nodes_block(ret);

            /* create a new Jmp for every Ret and place the in in */
            in[j] = new_r_Jmp(block);

            /* save the return values and shuffle them */
            for (k = 0; k < n_ret_vals; ++k)
                retvals[j + k*n_rets] = get_irn_n(ret, k);

            ++j;
        } else {
            endbl_in[last_idx++] = ret;
        }
    }

    /* ok, create a new block with all created in's */
    block = new_r_Block(irg, n_rets, in);

    /* now create the Phi nodes */
    for (j = i = 0; i < n_ret_vals; ++i, j += n_rets) {
        ir_mode *mode = get_irn_mode(retvals[j]);
        in[i] = new_r_Phi(block, n_rets, &retvals[j], mode);
    }

    endbl_in[last_idx++] = new_rd_Return(combined_dbgi, block, in[0], n_ret_vals-1, &in[1]);

    set_irn_in(endbl, last_idx, endbl_in);

    /* invalidate analysis information:
     * a new Block was added, so dominator, outs and loop are inconsistent,
     * trouts and callee-state should be still valid */
    confirm_irg_properties(irg,
                           IR_GRAPH_PROPERTY_NO_BADS
                           | IR_GRAPH_PROPERTY_NO_TUPLES
                           | IR_GRAPH_PROPERTY_NO_CRITICAL_EDGES
                           | IR_GRAPH_PROPERTY_NO_UNREACHABLE_CODE
                           | IR_GRAPH_PROPERTY_CONSISTENT_ENTITY_USAGE);
    add_irg_properties(irg, IR_GRAPH_PROPERTY_ONE_RETURN);
}
Esempio n. 4
0
{
	ir_node *lower_block = part_block_edges(cond_value);
	ir_node *upper_block = get_nodes_block(cond_value);
	foreach_out_edge_safe(upper_block, edge) {
		/* The cached nodes might belong to the lower block, so we have
		 * to clear the cache for moved nodes to avoid dominance problems. */
		ir_node *node = get_edge_src_irn(edge);
		set_irn_link(node, NULL);
	}
	ir_graph *irg         = get_irn_irg(cond_value);
	ir_node  *cond        = new_r_Cond(upper_block, cond_value);
	ir_node  *proj_true   = new_r_Proj(cond, mode_X, pn_Cond_true);
	ir_node  *proj_false  = new_r_Proj(cond, mode_X, pn_Cond_false);
	ir_node  *in_true[1]  = { proj_true };
	ir_node  *in_false[1] = { proj_false };
	ir_node  *true_block  = new_r_Block(irg, ARRAY_SIZE(in_true), in_true);
	ir_node  *false_block = new_r_Block(irg, ARRAY_SIZE(in_false),in_false);
	ir_node  *true_jmp    = new_r_Jmp(true_block);
	ir_node  *false_jmp   = new_r_Jmp(false_block);
	ir_node  *lower_in[2] = { true_jmp, false_jmp };
	ir_node  *one         = new_r_Const_one(irg, dest_mode);
	ir_node  *zero        = new_r_Const_null(irg, dest_mode);
	ir_node  *phi_in[2]   = { one, zero };

	set_irn_in(lower_block, ARRAY_SIZE(lower_in), lower_in);
	ir_node *phi = new_r_Phi(lower_block, ARRAY_SIZE(phi_in), phi_in, dest_mode);

	return phi;
}

static ir_node *lower_node(ir_node *node)