Example #1
0
ir_node *be_duplicate_node(ir_node *node)
{
	ir_node  *block = be_transform_node(get_nodes_block(node));
	ir_graph *irg   = env.irg;
	dbg_info *dbgi  = get_irn_dbg_info(node);
	ir_mode  *mode  = get_irn_mode(node);
	ir_op    *op    = get_irn_op(node);

	ir_node *new_node;
	int      arity = get_irn_arity(node);
	if (op->opar == oparity_dynamic) {
		new_node = new_ir_node(dbgi, irg, block, op, mode, -1, NULL);
		for (int i = 0; i < arity; ++i) {
			ir_node *in = get_irn_n(node, i);
			in = be_transform_node(in);
			add_irn_n(new_node, in);
		}
	} else {
		ir_node **ins = ALLOCAN(ir_node*, arity);
		for (int i = 0; i < arity; ++i) {
			ir_node *in = get_irn_n(node, i);
			ins[i] = be_transform_node(in);
		}

		new_node = new_ir_node(dbgi, irg, block, op, mode, arity, ins);
	}

	copy_node_attr(irg, node, new_node);
	be_duplicate_deps(node, new_node);

	new_node->node_nr = node->node_nr;
	return new_node;
}
Example #2
0
void ia32_swap_left_right(ir_node *node)
{
	ia32_attr_t *attr  = get_ia32_attr(node);
	ir_node     *left  = get_irn_n(node, n_ia32_binary_left);
	ir_node     *right = get_irn_n(node, n_ia32_binary_right);

	assert(is_ia32_commutative(node));
	attr->ins_permuted = !attr->ins_permuted;
	set_irn_n(node, n_ia32_binary_left,  right);
	set_irn_n(node, n_ia32_binary_right, left);
}
Example #3
0
static bool try_swap_inputs(ir_node *node)
{
	/* commutative operation, just switch the inputs */
	if (is_commutative(node)) {
		assert(get_amd64_attr_const(node)->op_mode == AMD64_OP_REG_REG);
		/* TODO: support Cmp input swapping */
		ir_node *in0 = get_irn_n(node, 0);
		ir_node *in1 = get_irn_n(node, 1);
		set_irn_n(node, 0, in1);
		set_irn_n(node, 1, in0);
		return true;
	}
	return false;
}
Example #4
0
	static void walk_topo_helper(ir_node* irn, std::function<void (ir_node*, void*)> walker, void* env)
	{
		if (irn_visited(irn))
			return;

		/* only break loops at phi/block nodes */
		const bool is_loop_breaker = is_Phi(irn) || is_Block(irn);

		if (is_loop_breaker)
			mark_irn_visited(irn);

		if (!is_Block(irn))
		{
			ir_node* block = get_nodes_block(irn);

			if (block != NULL)
				walk_topo_helper(block, walker, env);
		}

		for (int i = 0; i < get_irn_arity(irn); ++i)
		{
			ir_node* pred = get_irn_n(irn, i);
			walk_topo_helper(pred, walker, env);
		}

		if (is_loop_breaker || !irn_visited(irn))
			walker(irn, env);

		mark_irn_visited(irn);
	}
Example #5
0
/**
 * Check, whether a Return can be moved on block upwards.
 *
 * In a block with a Return, all live nodes must be linked
 * with the Return, otherwise they are dead (because the Return leaves
 * the graph, so no more users of the other nodes can exists.
 *
 * We can move a Return, if its predecessors are Phi nodes or
 * comes from another block. In the later case, it is always possible
 * to move the Return one block up, because the predecessor block must
 * dominate the Return block (SSA) and then it dominates the predecessor
 * block of the Return block as well.
 *
 * All predecessors of the Return block must be Jmp's of course, or we
 * cannot move it up, so we add blocks if needed.
 */
static bool can_move_ret(ir_node *ret)
{
    ir_node *retbl = get_nodes_block(ret);
    int i, n = get_irn_arity(ret);

    for (i = 0; i < n; ++i) {
        ir_node *pred = get_irn_n(ret, i);

        if (! is_Phi(pred) && retbl == get_nodes_block(pred)) {
            /* first condition failed, found a non-Phi predecessor
             * then is in the Return block */
            return false;
        }
    }

    /* check, that predecessors are Jmps */
    n = get_Block_n_cfgpreds(retbl);
    /* we cannot move above a labeled block, as this might kill the block */
    if (n <= 1 || get_Block_entity(retbl) != NULL)
        return false;
    for (i = 0; i < n; ++i) {
        ir_node *pred = get_Block_cfgpred(retbl, i);

        pred = skip_Tuple(pred);
        if (! is_Jmp(pred) && !is_Bad(pred)) {
            /* simply place a new block here */
            ir_graph *irg  = get_irn_irg(retbl);
            ir_node *block = new_r_Block(irg, 1, &pred);
            ir_node *jmp   = new_r_Jmp(block);
            set_Block_cfgpred(retbl, i, jmp);
        }
    }
    return true;
}
Example #6
0
/*
 * The 64-bit version of libgcc does not contain some builtin
 * functions for 32-bit values (__<builtin>si2) anymore.
 */
static void widen_builtin(ir_node *node)
{
	ir_type *mtp  = get_Builtin_type(node);
	ir_type *arg1 = get_method_param_type(mtp, 0);

	// Nothing to do, if argument size is at least machine size.
	if (get_type_size(arg1) >= ir_target_pointer_size())
		return;

	// Only touch builtins with no 32-bit version.
	ir_builtin_kind kind = get_Builtin_kind(node);
	if (kind != ir_bk_clz    &&
	    kind != ir_bk_ctz    &&
	    kind != ir_bk_ffs    &&
	    kind != ir_bk_parity &&
	    kind != ir_bk_popcount) {
		return;
	}

	ir_mode  *target_mode = get_reference_offset_mode(mode_P);
	dbg_info *dbgi        = get_irn_dbg_info(node);
	ir_node  *block       = get_nodes_block(node);
	ir_node  *op          = get_irn_n(node, n_Builtin_max + 1);

	ir_node *conv = new_rd_Conv(dbgi, block, op, target_mode);
	set_irn_n(node, n_Builtin_max + 1, conv);

	ir_type *new_arg1   = get_type_for_mode(target_mode);
	ir_type *new_result = get_method_res_type(mtp, 0);
	ir_type *new_type   = new_type_method(1, 1, false, cc_cdecl_set, mtp_no_property);
	set_method_param_type(new_type, 0, new_arg1);
	set_method_res_type(new_type, 0, new_result);
	set_Builtin_type(node, new_type);
}
Example #7
0
/**
  * Transforms a Sub to a Neg + Add, which subsequently allows swapping
  * of the inputs. The swapping is also (implicitly) done here.
  */
static void transform_sub_to_neg_add(ir_node *node,
                                     const arch_register_t *out_reg)
{
	ir_node  *block = get_nodes_block(node);
	dbg_info *dbgi  = get_irn_dbg_info(node);

	ir_node *in1 = get_irn_n(node, 0);
	ir_node *in2 = get_irn_n(node, 1);

	const arch_register_t *in2_reg = arch_get_irn_register(in2);

	const amd64_binop_addr_attr_t *attr = get_amd64_binop_addr_attr(node);
	ir_node                       *add;
	unsigned                       pos;
	if (is_amd64_subs(node)) {
		unsigned bits = x86_bytes_from_size(attr->base.base.size) * 8;
		ir_tarval *tv = get_mode_one(amd64_mode_xmm);
		tv = tarval_shl_unsigned(tv, bits - 1);
		ir_entity *sign_bit_const = create_float_const_entity(tv);

		amd64_binop_addr_attr_t xor_attr = {
			.base = {
				.base = {
					.op_mode = AMD64_OP_REG_ADDR,
					.size    = X86_SIZE_64,
				},
			},
		};
		init_lconst_addr(&xor_attr.base.addr, sign_bit_const);

		ir_node *xor_in[] = { in2 };
		ir_node *const xor = new_bd_amd64_xorp(dbgi, block, ARRAY_SIZE(xor_in), xor_in, amd64_xmm_reqs, &xor_attr);
		sched_add_before(node, xor);
		ir_node *const neg = be_new_Proj_reg(xor, pn_amd64_xorp_res, in2_reg);

		ir_node *in[] = { neg, in1 };
		add = new_bd_amd64_adds(dbgi, block, ARRAY_SIZE(in), in, amd64_xmm_xmm_reqs, attr);
		pos = pn_amd64_adds_res;
	} else {
Example #8
0
static void introduce_epilog(ir_node *ret)
{
	arch_register_t const *const sp_reg = &arm_registers[REG_SP];
	assert(arch_get_irn_register_req_in(ret, n_arm_Return_sp) == sp_reg->single_req);

	ir_node  *const sp         = get_irn_n(ret, n_arm_Return_sp);
	ir_node  *const block      = get_nodes_block(ret);
	ir_graph *const irg        = get_irn_irg(ret);
	ir_type  *const frame_type = get_irg_frame_type(irg);
	unsigned  const frame_size = get_type_size_bytes(frame_type);
	ir_node  *const incsp      = be_new_IncSP(sp_reg, block, sp, -frame_size, 0);
	set_irn_n(ret, n_arm_Return_sp, incsp);
	sched_add_before(ret, incsp);
}
Example #9
0
ir_entity *detect_call(ir_node* call) {
	assert(is_Call(call));

	ir_node *callee = get_irn_n(call, 1);
	if (is_Address(callee)) {
		ir_entity *entity = get_Address_entity(callee);
		if (entity == gcj_init_entity) {
			assert(get_irn_arity(call) == 3);
			ir_node *arg = get_irn_n(call, 2);
			assert(is_Address(arg));
			ir_entity *rtti = get_Address_entity(arg);
			ir_type *klass = cpmap_find(&rtti2class, rtti);
			assert(klass);
			ir_entity *init_method = cpmap_find(&class2init, klass);
			//assert(init_method); // _Jv_InitClass calls can be there although class has no clinit
			return init_method;
		} // else if (entity == ...)

	} else
		assert(false);

	return NULL;
}
Example #10
0
int is_irn_const_expression(ir_node *n)
{
	/* we are in danger iff an exception will arise. TODO: be more precisely,
	 * for instance Div. will NOT rise if divisor != 0 */
	if (is_binop(n) && !is_fragile_op(n))
		return is_irn_const_expression(get_binop_left(n)) && is_irn_const_expression(get_binop_right(n));

	switch (get_irn_opcode(n)) {
	case iro_Const:
	case iro_SymConst:
	case iro_Unknown:
		return 1;
	case iro_Conv:
		return is_irn_const_expression(get_irn_n(n, 0));
	default:
		break;
	}
	return 0;
}
Example #11
0
void be_default_lower_va_arg(ir_node *const node, bool const compound_is_ptr,
                             unsigned const stack_param_align)
{
	ir_node  *block = get_nodes_block(node);
	dbg_info *dbgi  = get_irn_dbg_info(node);
	ir_graph *irg   = get_irn_irg(node);

	ir_type       *aptype   = get_method_res_type(get_Builtin_type(node), 0);
	ir_node *const ap       = get_irn_n(node, 1);
	ir_node *const node_mem = get_Builtin_mem(node);

	ir_mode *apmode = get_type_mode(aptype);
	ir_node *res;
	ir_node *new_mem;
	if (apmode) {
		goto load;
	} else if (compound_is_ptr) {
		apmode = mode_P;
		aptype = get_type_for_mode(apmode);
load:;
		ir_node *const load = new_rd_Load(dbgi, block, node_mem, ap, apmode, aptype, cons_none);
		res     = new_r_Proj(load, apmode, pn_Load_res);
		new_mem = new_r_Proj(load, mode_M,pn_Load_M);
	} else {
		/* aptype has no associated mode, so it is represented as a pointer. */
		res     = ap;
		new_mem = node_mem;
	}

	unsigned const round_up    = round_up2(get_type_size(aptype),
	                                       stack_param_align);
	ir_mode *const offset_mode = get_reference_offset_mode(mode_P);
	ir_node *const offset      = new_r_Const_long(irg, offset_mode, round_up);
	ir_node *const new_ap      = new_rd_Add(dbgi, block, ap, offset);

	ir_node *const in[] = { new_mem, res, new_ap };
	turn_into_tuple(node, ARRAY_SIZE(in), in);
}
Example #12
0
static ir_node *transform_end(ir_node *node)
{
	/* end has to be duplicated manually because we need a dynamic in array */
	ir_graph *irg     = get_irn_irg(node);
	dbg_info *dbgi    = get_irn_dbg_info(node);
	ir_node  *block   = be_transform_node(get_nodes_block(node));
	ir_node  *new_end = new_ir_node(dbgi, irg, block, op_End, mode_X, -1, NULL);
	copy_node_attr(irg, node, new_end);
	be_duplicate_deps(node, new_end);

	set_irg_end(irg, new_end);

	/* do not transform predecessors yet to keep the pre-transform
	 * phase from visiting all the graph */
	int arity = get_irn_arity(node);
	for (int i = 0; i < arity; ++i) {
		ir_node *in = get_irn_n(node, i);
		add_End_keepalive(new_end, in);
	}
	be_enqueue_preds(node);

	return new_end;
}
Example #13
0
/**
 * Emit a Compare with conditional branch.
 */
static void emit_amd64_Jcc(const ir_node *irn)
{
	const ir_node      *proj_true  = NULL;
	const ir_node      *proj_false = NULL;
	const ir_node      *block;
	const ir_node      *next_block;
	const char         *suffix;
	const amd64_attr_t *attr      = get_amd64_attr_const(irn);
	ir_relation         relation  = attr->ext.relation;
	ir_node            *op1       = get_irn_n(irn, 0);
	const amd64_attr_t *cmp_attr  = get_amd64_attr_const(op1);
	bool                is_signed = !cmp_attr->data.cmp_unsigned;

	assert(is_amd64_Cmp(op1));

	foreach_out_edge(irn, edge) {
		ir_node *proj = get_edge_src_irn(edge);
		long nr = get_Proj_proj(proj);
		if (nr == pn_Cond_true) {
			proj_true = proj;
		} else {
			proj_false = proj;
		}
	}
Example #14
0
static ir_node *get_irn_safe_n(const ir_node *node, int n)
{
	if (n == -1 && is_Block(node))
		return NULL;
	return get_irn_n(node, n);
}
Example #15
0
ir_node *be_get_IncSP_pred(ir_node *irn)
{
	assert(be_is_IncSP(irn));
	return get_irn_n(irn, n_be_IncSP_pred);
}
Example #16
0
ir_node *be_get_CopyKeep_op(const ir_node *cpy)
{
	return get_irn_n(cpy, n_be_CopyKeep_op);
}
Example #17
0
/*
 * Normalize the Returns of a graph by creating a new End block
 * with One Return(Phi).
 * This is the preferred input for the if-conversion.
 *
 * In pseudocode, it means:
 *
 * if (a)
 *   return b;
 * else
 *   return c;
 *
 * is transformed into
 *
 * if (a)
 *   res = b;
 * else
 *   res = c;
 * return res;
 */
void normalize_one_return(ir_graph *irg)
{
    ir_node   *endbl         = get_irg_end_block(irg);
    ir_entity *entity        = get_irg_entity(irg);
    ir_type   *type          = get_entity_type(entity);
    int        n_ret_vals    = get_method_n_ress(type) + 1;
    int        n_rets        = 0;
    bool       filter_dbgi   = false;
    dbg_info  *combined_dbgi = NULL;
    int i, j, k, n, last_idx;
    ir_node **in, **retvals, **endbl_in;
    ir_node *block;

    /* look, if we have more than one return */
    n = get_Block_n_cfgpreds(endbl);
    if (n <= 0) {
        /* The end block has no predecessors, we have an endless
           loop. In that case, no returns exists. */
        confirm_irg_properties(irg, IR_GRAPH_PROPERTIES_ALL);
        add_irg_properties(irg, IR_GRAPH_PROPERTY_ONE_RETURN);
        return;
    }

    unsigned *const returns = rbitset_alloca(n);
    for (i = 0; i < n; ++i) {
        ir_node *node = get_Block_cfgpred(endbl, i);

        if (is_Return(node)) {
            dbg_info *dbgi = get_irn_dbg_info(node);

            if (dbgi != NULL && dbgi != combined_dbgi) {
                if (filter_dbgi) {
                    combined_dbgi = NULL;
                } else {
                    combined_dbgi = dbgi;
                    filter_dbgi   = true;
                }
            }

            ++n_rets;
            rbitset_set(returns, i);
        }
    }

    if (n_rets <= 1) {
        confirm_irg_properties(irg, IR_GRAPH_PROPERTIES_ALL);
        add_irg_properties(irg, IR_GRAPH_PROPERTY_ONE_RETURN);
        return;
    }

    in       = ALLOCAN(ir_node*, MAX(n_rets, n_ret_vals));
    retvals  = ALLOCAN(ir_node*, n_rets * n_ret_vals);
    endbl_in = ALLOCAN(ir_node*, n);

    last_idx = 0;
    for (j = i = 0; i < n; ++i) {
        ir_node *ret = get_Block_cfgpred(endbl, i);

        if (rbitset_is_set(returns, i)) {
            ir_node *block = get_nodes_block(ret);

            /* create a new Jmp for every Ret and place the in in */
            in[j] = new_r_Jmp(block);

            /* save the return values and shuffle them */
            for (k = 0; k < n_ret_vals; ++k)
                retvals[j + k*n_rets] = get_irn_n(ret, k);

            ++j;
        } else {
            endbl_in[last_idx++] = ret;
        }
    }

    /* ok, create a new block with all created in's */
    block = new_r_Block(irg, n_rets, in);

    /* now create the Phi nodes */
    for (j = i = 0; i < n_ret_vals; ++i, j += n_rets) {
        ir_mode *mode = get_irn_mode(retvals[j]);
        in[i] = new_r_Phi(block, n_rets, &retvals[j], mode);
    }

    endbl_in[last_idx++] = new_rd_Return(combined_dbgi, block, in[0], n_ret_vals-1, &in[1]);

    set_irn_in(endbl, last_idx, endbl_in);

    /* invalidate analysis information:
     * a new Block was added, so dominator, outs and loop are inconsistent,
     * trouts and callee-state should be still valid */
    confirm_irg_properties(irg,
                           IR_GRAPH_PROPERTY_NO_BADS
                           | IR_GRAPH_PROPERTY_NO_TUPLES
                           | IR_GRAPH_PROPERTY_NO_CRITICAL_EDGES
                           | IR_GRAPH_PROPERTY_NO_UNREACHABLE_CODE
                           | IR_GRAPH_PROPERTY_CONSISTENT_ENTITY_USAGE);
    add_irg_properties(irg, IR_GRAPH_PROPERTY_ONE_RETURN);
}
Example #18
0
const arch_register_t *arch_get_irn_register_in(const ir_node *node, int pos)
{
	ir_node *op = get_irn_n(node, pos);
	return arch_get_irn_register(op);
}
Example #19
0
/**
 * Pre-walker for connecting DAGs and counting.
 */
static void connect_dags(ir_node *node, void *env)
{
	dag_env_t   *dag_env = (dag_env_t*)env;
	int         i, arity;
	ir_node     *block;
	dag_entry_t *entry;
	ir_mode     *mode;

	if (is_Block(node))
		return;

	block = get_nodes_block(node);

	/* ignore start end end blocks */
	ir_graph *const irg = get_Block_irg(block);
	if (block == get_irg_start_block(irg) || block == get_irg_end_block(irg))
		return;

	/* ignore Phi nodes */
	if (is_Phi(node))
		return;

	if (dag_env->options & FIRMSTAT_ARGS_ARE_ROOTS && is_arg(node))
		return;

	mode = get_irn_mode(node);
	if (mode == mode_X || mode == mode_M) {
		/* do NOT count mode_X and mode_M nodes */
		return;
	}

	/* if this option is set, Loads are always leaves */
	if (dag_env->options & FIRMSTAT_LOAD_IS_LEAVE && is_Load(node))
		return;

	if (dag_env->options & FIRMSTAT_CALL_IS_LEAVE && is_Call(node))
		return;

	entry = get_irn_dag_entry(node);

	if (! entry) {
		/* found an unassigned node, maybe a new root */
		entry = new_dag_entry(dag_env, node);
	}

	/* put the predecessors into the same DAG as the current */
	for (i = 0, arity = get_irn_arity(node); i < arity; ++i) {
		ir_node *prev = get_irn_n(node, i);
		ir_mode *mode = get_irn_mode(prev);

		if (is_Phi(prev))
			continue;

		if (mode == mode_X || mode == mode_M)
			continue;

		/*
		 * copy constants if requested into the DAG's
		 * beware, do NOT add a link, as this will result in
		 * wrong intersections
		 */
		if (dag_env->options & FIRMSTAT_COPY_CONSTANTS) {
			if (is_irn_constlike(prev)) {
				++entry->num_nodes;
				++entry->num_inner_nodes;
			}
		}

		/* only nodes from the same block goes into the DAG */
		if (get_nodes_block(prev) == block) {
			dag_entry_t *prev_entry = get_irn_dag_entry(prev);

			if (! prev_entry) {
				/* not assigned node, put it into the same DAG */
				set_irn_dag_entry(prev, entry);
				++entry->num_nodes;
				++entry->num_inner_nodes;
			} else {
				if (prev_entry == entry) {
					/* We found a node that is already assigned to this DAG.
					   This DAG is not a tree. */
					entry->is_tree = 0;
				} else {
					/* two DAGs intersect: copy the data to one of them
					   and kill the other */
					entry->num_roots       += prev_entry->num_roots;
					entry->num_nodes       += prev_entry->num_nodes;
					entry->num_inner_nodes += prev_entry->num_inner_nodes;
					entry->is_tree         &= prev_entry->is_tree;

					--dag_env->num_of_dags;

					prev_entry->is_dead = 1;
					prev_entry->link    = entry;
				}
			}
		}
	}
}
Example #20
0
/**
 * @return The type of the function replacing the given node.
 */
static ir_type *get_softfloat_type(const ir_node *n)
{
	ir_node *operand      = get_irn_n(n, 0);
	ir_mode *operand_mode = get_irn_mode(operand);

	switch (get_irn_opcode(n)) {
	case iro_Div:
		operand_mode = get_irn_mode(get_Div_left(n));
		/* fall through */
	case iro_Add:
	case iro_Mul:
	case iro_Sub:
		if (operand_mode == mode_F)
			return binop_tp_f;
		else if (operand_mode == mode_D)
			return binop_tp_d;
		break;
	case iro_Cmp:
		if (operand_mode == mode_F)
			return cmp_tp_f;
		else if (operand_mode == mode_D)
			return cmp_tp_d;
		break;

	case iro_Conv: {
		ir_mode *const mode = get_irn_mode(n);
		if (operand_mode == mode_D) {
			if (mode == mode_F)
				return unop_tp_d_f;
			else if (get_mode_arithmetic(mode) == irma_twos_complement) {
				if (get_mode_size_bits(mode) <= 32)
					return mode_is_signed(mode) ? unop_tp_d_is : unop_tp_d_iu;
				else if (get_mode_size_bits(mode) == 64)
					return mode_is_signed(mode) ? unop_tp_d_ls : unop_tp_d_lu;
			}
		} else if (operand_mode == mode_F) {
			if (mode == mode_D)
				return unop_tp_f_d;
			else if (get_mode_arithmetic(mode) == irma_twos_complement) {
				if (get_mode_size_bits(mode) <= 32)
					return mode_is_signed(mode) ? unop_tp_f_is : unop_tp_f_iu;
				else if (get_mode_size_bits(mode) == 64)
					return mode_is_signed(mode) ? unop_tp_f_ls : unop_tp_f_lu;
			}
		} else if (get_mode_arithmetic(operand_mode) == irma_twos_complement) {
			if (mode_is_signed(operand_mode)) {
				if (get_mode_size_bits(operand_mode) <= 32) {
					if (mode == mode_D)
						return unop_tp_is_d;
					else if (mode == mode_F)
						return unop_tp_is_f;
				} else if (get_mode_size_bits(operand_mode) == 64) {
					if (mode == mode_D)
						return unop_tp_ls_d;
					else if (mode == mode_F)
						return unop_tp_ls_f;
				}
			} else {
				if (get_mode_size_bits(operand_mode) <= 32) {
					if (mode == mode_D)
						return unop_tp_iu_d;
					else if (mode == mode_F)
						return unop_tp_iu_f;
				} else if (get_mode_size_bits(operand_mode) == 64) {
					if (mode == mode_D)
						return unop_tp_lu_d;
					else if (mode == mode_F)
						return unop_tp_lu_f;
				}
			}
		}
		break;
	}

	case iro_Minus:
		if (operand_mode == mode_F)
			return unop_tp_f;
		else if (operand_mode == mode_D)
			return unop_tp_d;
		break;
	default: break;
	}

	panic("could not determine a suitable type");
}
Example #21
0
/**
 * Post-walker to detect DAG roots that are referenced form other blocks
 */
static void find_dag_roots(ir_node *node, void *env)
{
	dag_env_t   *dag_env = (dag_env_t*)env;
	int         i, arity;
	dag_entry_t *entry;
	ir_node     *block;

	if (is_Block(node))
		return;

	block = get_nodes_block(node);

	/* ignore start end end blocks */
	ir_graph *const irg = get_Block_irg(block);
	if (block == get_irg_start_block(irg) || block == get_irg_end_block(irg))
		return;

	/* Phi nodes always references nodes from "other" block */
	if (is_Phi(node)) {
		if (get_irn_mode(node) != mode_M) {
			for (i = 0, arity = get_irn_arity(node); i < arity; ++i) {
				ir_node *prev = get_irn_n(node, i);

				if (is_Phi(prev))
					continue;

				if (dag_env->options & FIRMSTAT_COPY_CONSTANTS) {
					if (is_irn_constlike(prev))
						continue;
				}

				entry = get_irn_dag_entry(prev);

				if (! entry) {
					/* found an unassigned node, a new root */
					entry = new_dag_entry(dag_env, node);
					entry->is_ext_ref = 1;
				}
			}
		}
	} else {

		for (i = 0, arity = get_irn_arity(node); i < arity; ++i) {
				ir_node *prev = get_irn_n(node, i);
				ir_mode *mode = get_irn_mode(prev);

				if (mode == mode_X || mode == mode_M)
					continue;

				if (is_Phi(prev))
					continue;

				if (dag_env->options & FIRMSTAT_COPY_CONSTANTS) {
					if (is_irn_constlike(prev))
						continue;
				}

				if (get_nodes_block(prev) != block) {
					/* The predecessor is from another block. It forms
					   a root. */
					entry = get_irn_dag_entry(prev);
					if (! entry) {
						/* found an unassigned node, a new root */
						entry = new_dag_entry(dag_env, node);
						entry->is_ext_ref = 1;
					}
				}
			}
	}
}