Esempio n. 1
0
int attrs_equal_be_node(const ir_node *node1, const ir_node *node2)
{
	const backend_info_t *info1 = be_get_info(node1);
	const backend_info_t *info2 = be_get_info(node2);
	size_t                len   = ARR_LEN(info1->out_infos);
	if (ARR_LEN(info2->out_infos) != len)
		return false;

	int arity = get_irn_arity(node1);
	assert(arity == get_irn_arity(node2));
	for (int in = 0; in < arity; ++in) {
		if (info1->in_reqs[in] != info2->in_reqs[in])
			return false;
	}

	for (size_t i = 0; i < len; ++i) {
		const reg_out_info_t *out1 = &info1->out_infos[i];
		const reg_out_info_t *out2 = &info2->out_infos[i];
		if (out1->reg != out2->reg)
			return false;
		if (!reg_reqs_equal(out1->req, out2->req))
			return false;
	}

	return true;
}
Esempio n. 2
0
static void sparc_sp_sim(ir_node *const node, stack_pointer_state_t *state)
{
	sparc_determine_frameoffset(node, state->offset);

	if (is_sparc_Save(node)) {
		sparc_attr_t *const attr = get_sparc_attr(node);
		if (get_irn_arity(node) == 3)
			panic("no support for _reg variant yet");

		/* Adjust for alignment */
		assert(state->misalign == 0);
		int const prev_offset = state->offset;
		int const new_offset  = prev_offset - state->align_padding
		                        - attr->immediate_value;
		int const aligned     = round_up2(new_offset, 1u << state->p2align);
		attr->immediate_value = -(aligned - prev_offset);
		state->align_padding  = aligned - new_offset;
		state->offset         = aligned;
	} else if (is_sparc_SubSP(node) || is_sparc_AddSP(node)) {
		state->align_padding = 0;
	} else if (is_sparc_RestoreZero(node)) {
		state->offset        = 0;
		state->align_padding = 0;
	}
}
Esempio n. 3
0
static void be_node_set_register_req_in(ir_node *const node, int const pos,
                                        arch_register_req_t const *const req)
{
	backend_info_t *info = be_get_info(node);
	assert(pos < get_irn_arity(node));
	info->in_reqs[pos] = req;
}
Esempio n. 4
0
ir_node *be_transform_phi(ir_node *node, const arch_register_req_t *req)
{
	ir_node  *block = be_transform_nodes_block(node);
	ir_graph *irg   = get_irn_irg(block);
	dbg_info *dbgi  = get_irn_dbg_info(node);

	/* phi nodes allow loops, so we use the old arguments for now
	 * and fix this later */
	ir_node **ins   = get_irn_in(node)+1;
	int       arity = get_irn_arity(node);
	ir_mode  *mode  = req->cls != NULL ? req->cls->mode : get_irn_mode(node);
	ir_node  *phi   = new_ir_node(dbgi, irg, block, op_Phi, mode, arity, ins);
	copy_node_attr(irg, node, phi);

	backend_info_t *info = be_get_info(phi);
	info->in_reqs = be_allocate_in_reqs(irg, arity);
	for (int i = 0; i < arity; ++i) {
		info->in_reqs[i] = req;
	}

	arch_set_irn_register_req_out(phi, 0, req);
	be_enqueue_preds(node);

	return phi;
}
Esempio n. 5
0
/**
 * Check, whether a Return can be moved on block upwards.
 *
 * In a block with a Return, all live nodes must be linked
 * with the Return, otherwise they are dead (because the Return leaves
 * the graph, so no more users of the other nodes can exists.
 *
 * We can move a Return, if its predecessors are Phi nodes or
 * comes from another block. In the later case, it is always possible
 * to move the Return one block up, because the predecessor block must
 * dominate the Return block (SSA) and then it dominates the predecessor
 * block of the Return block as well.
 *
 * All predecessors of the Return block must be Jmp's of course, or we
 * cannot move it up, so we add blocks if needed.
 */
static bool can_move_ret(ir_node *ret)
{
    ir_node *retbl = get_nodes_block(ret);
    int i, n = get_irn_arity(ret);

    for (i = 0; i < n; ++i) {
        ir_node *pred = get_irn_n(ret, i);

        if (! is_Phi(pred) && retbl == get_nodes_block(pred)) {
            /* first condition failed, found a non-Phi predecessor
             * then is in the Return block */
            return false;
        }
    }

    /* check, that predecessors are Jmps */
    n = get_Block_n_cfgpreds(retbl);
    /* we cannot move above a labeled block, as this might kill the block */
    if (n <= 1 || get_Block_entity(retbl) != NULL)
        return false;
    for (i = 0; i < n; ++i) {
        ir_node *pred = get_Block_cfgpred(retbl, i);

        pred = skip_Tuple(pred);
        if (! is_Jmp(pred) && !is_Bad(pred)) {
            /* simply place a new block here */
            ir_graph *irg  = get_irn_irg(retbl);
            ir_node *block = new_r_Block(irg, 1, &pred);
            ir_node *jmp   = new_r_Jmp(block);
            set_Block_cfgpred(retbl, i, jmp);
        }
    }
    return true;
}
Esempio n. 6
0
ir_node *be_duplicate_node(ir_node *const node)
{
	int       const arity = get_irn_arity(node);
	ir_node **const ins   = ALLOCAN(ir_node*, arity);
	foreach_irn_in(node, i, in) {
		ins[i] = be_transform_node(in);
	}
Esempio n. 7
0
ir_node *be_duplicate_node(ir_node *node)
{
	ir_node  *block = be_transform_node(get_nodes_block(node));
	ir_graph *irg   = env.irg;
	dbg_info *dbgi  = get_irn_dbg_info(node);
	ir_mode  *mode  = get_irn_mode(node);
	ir_op    *op    = get_irn_op(node);

	ir_node *new_node;
	int      arity = get_irn_arity(node);
	if (op->opar == oparity_dynamic) {
		new_node = new_ir_node(dbgi, irg, block, op, mode, -1, NULL);
		for (int i = 0; i < arity; ++i) {
			ir_node *in = get_irn_n(node, i);
			in = be_transform_node(in);
			add_irn_n(new_node, in);
		}
	} else {
		ir_node **ins = ALLOCAN(ir_node*, arity);
		for (int i = 0; i < arity; ++i) {
			ir_node *in = get_irn_n(node, i);
			ins[i] = be_transform_node(in);
		}

		new_node = new_ir_node(dbgi, irg, block, op, mode, arity, ins);
	}

	copy_node_attr(irg, node, new_node);
	be_duplicate_deps(node, new_node);

	new_node->node_nr = node->node_nr;
	return new_node;
}
Esempio n. 8
0
/**
 * Returns non-zero if node has no backarray, or
 *                  if size of backarray == size of in array.
 */
static int legal_backarray(const ir_node *n)
{
	bitset_t *ba = mere_get_backarray(n);
	if (ba && (bitset_size(ba) != (unsigned) get_irn_arity(n)))
		return 0;
	return 1;
}
Esempio n. 9
0
	static void walk_topo_helper(ir_node* irn, std::function<void (ir_node*, void*)> walker, void* env)
	{
		if (irn_visited(irn))
			return;

		/* only break loops at phi/block nodes */
		const bool is_loop_breaker = is_Phi(irn) || is_Block(irn);

		if (is_loop_breaker)
			mark_irn_visited(irn);

		if (!is_Block(irn))
		{
			ir_node* block = get_nodes_block(irn);

			if (block != NULL)
				walk_topo_helper(block, walker, env);
		}

		for (int i = 0; i < get_irn_arity(irn); ++i)
		{
			ir_node* pred = get_irn_n(irn, i);
			walk_topo_helper(pred, walker, env);
		}

		if (is_loop_breaker || !irn_visited(irn))
			walker(irn, env);

		mark_irn_visited(irn);
	}
Esempio n. 10
0
/**
 * Copies a node to a new irg. The Ins of the new node point to
 * the predecessors on the old irg.  n->link points to the new node.
 *
 * @param n    The node to be copied
 * @param irg  the new irg
 *
 * Does NOT copy standard nodes like Start, End etc that are fixed
 * in an irg. Instead, the corresponding nodes of the new irg are returned.
 * Note further, that the new nodes have no block.
 */
static void copy_irn_to_irg(ir_node *n, ir_graph *irg)
{
    /* do not copy standard nodes */
    ir_node *nn = NULL;
    switch (get_irn_opcode(n)) {
    case iro_NoMem:
        nn = get_irg_no_mem(irg);
        break;

    case iro_Block: {
        ir_graph *old_irg = get_irn_irg(n);
        if (n == get_irg_start_block(old_irg))
            nn = get_irg_start_block(irg);
        else if (n == get_irg_end_block(old_irg))
            nn = get_irg_end_block(irg);
        break;
    }

    case iro_Start:
        nn = get_irg_start(irg);
        break;

    case iro_End:
        nn = get_irg_end(irg);
        break;

    case iro_Proj: {
        ir_graph *old_irg = get_irn_irg(n);
        if (n == get_irg_frame(old_irg))
            nn = get_irg_frame(irg);
        else if (n == get_irg_initial_mem(old_irg))
            nn = get_irg_initial_mem(irg);
        else if (n == get_irg_args(old_irg))
            nn = get_irg_args(irg);
        break;
    }
    }

    if (nn) {
        set_irn_link(n, nn);
        return;
    }

    nn = new_ir_node(get_irn_dbg_info(n),
                     irg,
                     NULL,            /* no block yet, will be set later */
                     get_irn_op(n),
                     get_irn_mode(n),
                     get_irn_arity(n),
                     get_irn_in(n));


    /* Copy the attributes.  These might point to additional data.  If this
       was allocated on the old obstack the pointers now are dangling.  This
       frees e.g. the memory of the graph_arr allocated in new_immBlock. */
    copy_node_attr(irg, n, nn);
    set_irn_link(n, nn);
}
Esempio n. 11
0
void be_set_phi_reg_req(ir_node *node, const arch_register_req_t *req)
{
	assert(mode_is_data(get_irn_mode(node)));
	backend_info_t *info = be_get_info(node);
	info->out_infos[0].req = req;
	for (int i = 0, arity = get_irn_arity(node); i < arity; ++i) {
		info->in_reqs[i] = req;
	}
}
Esempio n. 12
0
/**
 * Block-walker, remove Bad block predecessors and shorten Phis.
 * Phi links must be up-to-date.
 */
static void block_remove_bads(ir_node *block)
{
	/* 1. Create a new block without Bad inputs */
	ir_graph  *irg     = get_irn_irg(block);
	const int  max     = get_Block_n_cfgpreds(block);
	ir_node  **new_in  = ALLOCAN(ir_node*, max);
	unsigned   new_max = 0;
	for (int i = 0; i < max; ++i) {
		ir_node *const block_pred = get_Block_cfgpred(block, i);
		if (!is_Bad(block_pred)) {
			new_in[new_max++] = block_pred;
		}
	}

	/* If the end block is unreachable, it might have zero predecessors. */
	if (new_max == 0) {
		ir_node *end_block = get_irg_end_block(irg);
		if (block == end_block) {
			set_irn_in(block, new_max, new_in);
			return;
		}
	}

	dbg_info  *dbgi         = get_irn_dbg_info(block);
	ir_node   *new_block    = new_rd_Block(dbgi, irg, new_max, new_in);
	ir_entity *block_entity = get_Block_entity(block);
	set_Block_entity(new_block, block_entity);

	/* 2. Remove inputs on Phis, where the block input is Bad. */
	for (ir_node *phi = get_Block_phis(block), *next; phi != NULL; phi = next) {
		next = get_Phi_next(phi);

		assert(get_irn_arity(phi) == max);

		unsigned j = 0;
		foreach_irn_in(phi, i, pred) {
			ir_node *const block_pred = get_Block_cfgpred(block, i);
			if (!is_Bad(block_pred)) {
				new_in[j++] = pred;
			}
		}
		assert(j == new_max);

		/* shortcut if only 1 phi input is left */
		if (new_max == 1) {
			ir_node *new_node = new_in[0];
			/* can happen inside unreachable endless loops */
			if (new_node == phi)
				return;
			if (get_Phi_loop(phi))
				remove_keep_alive(phi);
			exchange(phi, new_node);
		} else {
			set_irn_in(phi, new_max, new_in);
		}
	}
Esempio n. 13
0
/**
 * This function returns the last definition of a value.  In case
 * this value was last defined in a previous block, Phi nodes are
 * inserted.  If the part of the firm graph containing the definition
 * is not yet constructed, a dummy Phi node is returned.
 *
 * @param block   the current block
 * @param pos     the value number of the value searched
 * @param mode    the mode of this value (needed for Phi construction)
 */
static ir_node *get_r_value_internal(ir_node *block, int pos, ir_mode *mode)
{
	ir_node *res = block->attr.block.graph_arr[pos];
	if (res != NULL)
		return res;

	/* in a matured block we can immediately determine the phi arguments */
	if (get_Block_matured(block)) {
		ir_graph *const irg   = get_irn_irg(block);
		int       const arity = get_irn_arity(block);
		/* no predecessors: use unknown value */
		if (arity == 0) {
			if (block == get_irg_start_block(irg)) {
				if (default_initialize_local_variable != NULL) {
					ir_node *rem = get_r_cur_block(irg);
					set_r_cur_block(irg, block);
					res = default_initialize_local_variable(irg, mode, pos - 1);
					set_r_cur_block(irg, rem);
				} else {
					res = new_r_Unknown(irg, mode);
				}
			} else {
				goto bad; /* unreachable block, use Bad */
			}
		/* one predecessor just use its value */
		} else if (arity == 1) {
			ir_node *cfgpred = get_Block_cfgpred(block, 0);
			if (is_Bad(cfgpred)) {
bad:
				res = new_r_Bad(irg, mode);
			} else {
				ir_node *cfgpred_block = get_nodes_block(cfgpred);
				res = get_r_value_internal(cfgpred_block, pos, mode);
			}
		} else {
			/* multiple predecessors construct Phi */
			res = new_rd_Phi0(NULL, block, mode, pos);
			/* enter phi0 into our variable value table to break cycles
			 * arising from set_phi_arguments */
			block->attr.block.graph_arr[pos] = res;
			res = set_phi_arguments(res, pos);
		}
	} else {
		/* in case of immature block we have to keep a Phi0 */
		res = new_rd_Phi0(NULL, block, mode, pos);
		/* enqueue phi so we can set arguments once the block matures */
		res->attr.phi.next     = block->attr.block.phis;
		block->attr.block.phis = res;
	}
	block->attr.block.graph_arr[pos] = res;
	return res;
}
Esempio n. 14
0
/**
 * Returns backarray if the node can have backedges, else returns
 * NULL.
 */
static bitset_t *get_backarray(const ir_node *n)
{
	bitset_t *ba = mere_get_backarray(n);
#ifndef NDEBUG
	if (ba != NULL) {
		size_t bal = bitset_size(ba);  /* avoid macro expansion in assertion. */
		size_t inl = get_irn_arity(n);
		assert(bal == inl);
	}
#endif

	return ba;
}
Esempio n. 15
0
static int sparc_get_sp_bias(const ir_node *node)
{
	if (be_is_IncSP(node))
		return be_get_IncSP_offset(node);
	if (is_sparc_Save(node)) {
		const sparc_attr_t *attr = get_sparc_attr_const(node);
		if (get_irn_arity(node) == 3)
			panic("no support for _reg variant yet");

		return -attr->immediate_value;
	}
	if (is_sparc_RestoreZero(node))
		return SP_BIAS_RESET;
	return 0;
}
Esempio n. 16
0
/**
 * Transform helper for blocks.
 */
static ir_node *transform_block(ir_node *node)
{
	ir_graph *irg   = get_irn_irg(node);
	dbg_info *dbgi  = get_irn_dbg_info(node);
	ir_mode  *mode  = get_irn_mode(node);
	ir_node  *block = new_ir_node(dbgi, irg, NULL, get_irn_op(node), mode,
	                              get_irn_arity(node), get_irn_in(node) + 1);
	copy_node_attr(irg, node, block);
	block->node_nr = node->node_nr;

	/* transfer execfreq value */
	double execfreq = get_block_execfreq(node);
	set_block_execfreq(block, execfreq);

	/* put the preds in the worklist */
	be_enqueue_preds(node);

	return block;
}
Esempio n. 17
0
void fix_backedges(struct obstack *obst, ir_node *n)
{
	bitset_t *arr = mere_get_backarray(n);
	if (arr == NULL)
		return;

	int arity = get_irn_arity(n);
	if (bitset_size(arr) != (unsigned) arity) {
		arr = new_backedge_arr(obst, arity);

		unsigned opc = get_irn_opcode(n);
		if (opc == iro_Phi)
			n->attr.phi.u.backedge = arr;
		else if (opc == iro_Block) {
			n->attr.block.backedge = arr;
		}
	}

	assert(legal_backarray(n));
}
Esempio n. 18
0
/**
 * Initializes the generic attribute of all be nodes and return it.
 */
static void init_node_attr(ir_node *const node, unsigned const n_outputs, arch_irn_flags_t const flags)
{
	ir_graph       *irg  = get_irn_irg(node);
	backend_info_t *info = be_get_info(node);

	unsigned                    const arity   = get_irn_arity(node);
	arch_register_req_t const **const in_reqs =
		is_irn_dynamic(node) ? NEW_ARR_F(arch_register_req_t const*, arity) :
		arity != 0           ? be_allocate_in_reqs(irg, arity) :
		NULL;
	for (unsigned i = 0; i < arity; ++i) {
		in_reqs[i] = arch_no_register_req;
	}
	info->in_reqs = in_reqs;

	struct obstack *const obst = be_get_be_obst(irg);
	info->out_infos = NEW_ARR_DZ(reg_out_info_t, obst, n_outputs);
	for (unsigned i = 0; i < n_outputs; ++i) {
		info->out_infos[i].req = arch_no_register_req;
	}
	info->flags = flags;
}
Esempio n. 19
0
/**
 * Computes the predecessors for the real phi node, and then
 * allocates and returns this node.  The routine called to allocate the
 * node might optimize it away and return a real value.
 * This function must be called with an in-array of proper size.
 */
static ir_node *set_phi_arguments(ir_node *phi, int pos)
{
	ir_node  *block        = get_nodes_block(phi);
	ir_graph *irg          = get_irn_irg(block);
	int       arity        = get_irn_arity(block);
	ir_node **in           = ALLOCAN(ir_node*, arity);
	ir_mode  *mode         = get_irn_mode(phi);

	/* This loop goes to all predecessor blocks of the block the Phi node
	 * is in and there finds the operands of the Phi node by calling
	 * get_r_value_internal.  */
	for (int i = 0; i < arity; ++i) {
		ir_node *cfgpred = get_Block_cfgpred_block(block, i);
		ir_node *value;
		if (cfgpred == NULL) {
			value = new_r_Bad(irg, mode);
		} else {
			value = get_r_value_internal(cfgpred, pos, mode);
		}
		in[i] = value;
	}

	phi->attr.phi.u.backedge = new_backedge_arr(get_irg_obstack(irg), arity);
	set_irn_in(phi, arity, in);

	verify_new_node(phi);

	try_remove_unnecessary_phi(phi);

	/* To solve the problem of (potentially) endless loops being observable
	 * behaviour we add a keep-alive edge too all PhiM nodes. */
	if (mode == mode_M && !is_Id(phi)) {
		phi->attr.phi.loop = true;
		keep_alive(phi);
	}

	return phi;
}
Esempio n. 20
0
static ir_node *transform_end(ir_node *node)
{
	/* end has to be duplicated manually because we need a dynamic in array */
	ir_graph *irg     = get_irn_irg(node);
	dbg_info *dbgi    = get_irn_dbg_info(node);
	ir_node  *block   = be_transform_node(get_nodes_block(node));
	ir_node  *new_end = new_ir_node(dbgi, irg, block, op_End, mode_X, -1, NULL);
	copy_node_attr(irg, node, new_end);
	be_duplicate_deps(node, new_end);

	set_irg_end(irg, new_end);

	/* do not transform predecessors yet to keep the pre-transform
	 * phase from visiting all the graph */
	int arity = get_irn_arity(node);
	for (int i = 0; i < arity; ++i) {
		ir_node *in = get_irn_n(node, i);
		add_End_keepalive(new_end, in);
	}
	be_enqueue_preds(node);

	return new_end;
}
Esempio n. 21
0
ir_entity *detect_call(ir_node* call) {
	assert(is_Call(call));

	ir_node *callee = get_irn_n(call, 1);
	if (is_Address(callee)) {
		ir_entity *entity = get_Address_entity(callee);
		if (entity == gcj_init_entity) {
			assert(get_irn_arity(call) == 3);
			ir_node *arg = get_irn_n(call, 2);
			assert(is_Address(arg));
			ir_entity *rtti = get_Address_entity(arg);
			ir_type *klass = cpmap_find(&rtti2class, rtti);
			assert(klass);
			ir_entity *init_method = cpmap_find(&class2init, klass);
			//assert(init_method); // _Jv_InitClass calls can be there although class has no clinit
			return init_method;
		} // else if (entity == ...)

	} else
		assert(false);

	return NULL;
}
Esempio n. 22
0
/**
 * Pre-walker for connecting DAGs and counting.
 */
static void connect_dags(ir_node *node, void *env)
{
	dag_env_t   *dag_env = (dag_env_t*)env;
	int         i, arity;
	ir_node     *block;
	dag_entry_t *entry;
	ir_mode     *mode;

	if (is_Block(node))
		return;

	block = get_nodes_block(node);

	/* ignore start end end blocks */
	ir_graph *const irg = get_Block_irg(block);
	if (block == get_irg_start_block(irg) || block == get_irg_end_block(irg))
		return;

	/* ignore Phi nodes */
	if (is_Phi(node))
		return;

	if (dag_env->options & FIRMSTAT_ARGS_ARE_ROOTS && is_arg(node))
		return;

	mode = get_irn_mode(node);
	if (mode == mode_X || mode == mode_M) {
		/* do NOT count mode_X and mode_M nodes */
		return;
	}

	/* if this option is set, Loads are always leaves */
	if (dag_env->options & FIRMSTAT_LOAD_IS_LEAVE && is_Load(node))
		return;

	if (dag_env->options & FIRMSTAT_CALL_IS_LEAVE && is_Call(node))
		return;

	entry = get_irn_dag_entry(node);

	if (! entry) {
		/* found an unassigned node, maybe a new root */
		entry = new_dag_entry(dag_env, node);
	}

	/* put the predecessors into the same DAG as the current */
	for (i = 0, arity = get_irn_arity(node); i < arity; ++i) {
		ir_node *prev = get_irn_n(node, i);
		ir_mode *mode = get_irn_mode(prev);

		if (is_Phi(prev))
			continue;

		if (mode == mode_X || mode == mode_M)
			continue;

		/*
		 * copy constants if requested into the DAG's
		 * beware, do NOT add a link, as this will result in
		 * wrong intersections
		 */
		if (dag_env->options & FIRMSTAT_COPY_CONSTANTS) {
			if (is_irn_constlike(prev)) {
				++entry->num_nodes;
				++entry->num_inner_nodes;
			}
		}

		/* only nodes from the same block goes into the DAG */
		if (get_nodes_block(prev) == block) {
			dag_entry_t *prev_entry = get_irn_dag_entry(prev);

			if (! prev_entry) {
				/* not assigned node, put it into the same DAG */
				set_irn_dag_entry(prev, entry);
				++entry->num_nodes;
				++entry->num_inner_nodes;
			} else {
				if (prev_entry == entry) {
					/* We found a node that is already assigned to this DAG.
					   This DAG is not a tree. */
					entry->is_tree = 0;
				} else {
					/* two DAGs intersect: copy the data to one of them
					   and kill the other */
					entry->num_roots       += prev_entry->num_roots;
					entry->num_nodes       += prev_entry->num_nodes;
					entry->num_inner_nodes += prev_entry->num_inner_nodes;
					entry->is_tree         &= prev_entry->is_tree;

					--dag_env->num_of_dags;

					prev_entry->is_dead = 1;
					prev_entry->link    = entry;
				}
			}
		}
	}
}
Esempio n. 23
0
/**
 * Compute the weight of a method parameter
 *
 * @param arg  The parameter them weight muss be computed.
 */
static unsigned calc_method_param_weight(ir_node *arg)
{
	/* We mark the nodes to avoid endless recursion */
	mark_irn_visited(arg);

	unsigned weight = null_weight;
	for (int i = get_irn_n_outs(arg); i-- > 0; ) {
		ir_node *succ = get_irn_out(arg, i);
		if (irn_visited(succ))
			continue;

		/* We should not walk over the memory edge.*/
		if (get_irn_mode(succ) == mode_M)
			continue;

		switch (get_irn_opcode(succ)) {
		case iro_Call:
			if (get_Call_ptr(succ) == arg) {
				/* the arguments is used as an pointer input for a call,
				   we can probably change an indirect Call into a direct one. */
				weight += indirect_call_weight;
			}
			break;
		case iro_Cmp: {
			/* We have reached a cmp and we must increase the
			   weight with the cmp_weight. */
			ir_node *op;
			if (get_Cmp_left(succ) == arg)
				op = get_Cmp_right(succ);
			else
				op = get_Cmp_left(succ);

			if (is_irn_constlike(op)) {
				weight += const_cmp_weight;
			} else
				weight += cmp_weight;
			break;
		}
		case iro_Cond:
			/* the argument is used for a SwitchCond, a big win */
			weight += const_cmp_weight * get_irn_n_outs(succ);
			break;
		case iro_Id:
			/* when looking backward we might find Id nodes */
			weight += calc_method_param_weight(succ);
			break;
		case iro_Tuple:
			/* unoptimized tuple */
			for (int j = get_Tuple_n_preds(succ); j-- > 0; ) {
				ir_node *pred = get_Tuple_pred(succ, j);
				if (pred == arg) {
					/* look for Proj(j) */
					for (int k = get_irn_n_outs(succ); k-- > 0; ) {
						ir_node *succ_succ = get_irn_out(succ, k);
						if (is_Proj(succ_succ)) {
							if (get_Proj_proj(succ_succ) == j) {
								/* found */
								weight += calc_method_param_weight(succ_succ);
							}
						} else {
							/* this should NOT happen */
						}
					}
				}
			}
			break;
		default:
			if (is_binop(succ)) {
				/* We have reached a BinOp and we must increase the
				   weight with the binop_weight. If the other operand of the
				   BinOp is a constant we increase the weight with const_binop_weight
				   and call the function recursive.
				 */
				ir_node *op;
				if (get_binop_left(succ) == arg)
					op = get_binop_right(succ);
				else
					op = get_binop_left(succ);

				if (is_irn_constlike(op)) {
					weight += const_binop_weight;
					weight += calc_method_param_weight(succ);
				} else
					weight += binop_weight;
			} else if (get_irn_arity(succ) == 1) {
				/* We have reached a binop and we must increase the
				   weight with the const_binop_weight and call the function
				   recursive.*/
				weight += const_binop_weight;
				weight += calc_method_param_weight(succ);
			}
			break;
		}
	}
	set_irn_link(arg, NULL);
	return weight;
}
Esempio n. 24
0
/**
 * Post-walker to detect DAG roots that are referenced form other blocks
 */
static void find_dag_roots(ir_node *node, void *env)
{
	dag_env_t   *dag_env = (dag_env_t*)env;
	int         i, arity;
	dag_entry_t *entry;
	ir_node     *block;

	if (is_Block(node))
		return;

	block = get_nodes_block(node);

	/* ignore start end end blocks */
	ir_graph *const irg = get_Block_irg(block);
	if (block == get_irg_start_block(irg) || block == get_irg_end_block(irg))
		return;

	/* Phi nodes always references nodes from "other" block */
	if (is_Phi(node)) {
		if (get_irn_mode(node) != mode_M) {
			for (i = 0, arity = get_irn_arity(node); i < arity; ++i) {
				ir_node *prev = get_irn_n(node, i);

				if (is_Phi(prev))
					continue;

				if (dag_env->options & FIRMSTAT_COPY_CONSTANTS) {
					if (is_irn_constlike(prev))
						continue;
				}

				entry = get_irn_dag_entry(prev);

				if (! entry) {
					/* found an unassigned node, a new root */
					entry = new_dag_entry(dag_env, node);
					entry->is_ext_ref = 1;
				}
			}
		}
	} else {

		for (i = 0, arity = get_irn_arity(node); i < arity; ++i) {
				ir_node *prev = get_irn_n(node, i);
				ir_mode *mode = get_irn_mode(prev);

				if (mode == mode_X || mode == mode_M)
					continue;

				if (is_Phi(prev))
					continue;

				if (dag_env->options & FIRMSTAT_COPY_CONSTANTS) {
					if (is_irn_constlike(prev))
						continue;
				}

				if (get_nodes_block(prev) != block) {
					/* The predecessor is from another block. It forms
					   a root. */
					entry = get_irn_dag_entry(prev);
					if (! entry) {
						/* found an unassigned node, a new root */
						entry = new_dag_entry(dag_env, node);
						entry->is_ext_ref = 1;
					}
				}
			}
	}
}
Esempio n. 25
0
unsigned be_get_MemPerm_entity_arity(const ir_node *irn)
{
	assert(be_is_MemPerm(irn));
	return get_irn_arity(irn);
}