Пример #1
0
/**
 * Copies a node to a new irg. The Ins of the new node point to
 * the predecessors on the old irg.  n->link points to the new node.
 *
 * @param n    The node to be copied
 * @param irg  the new irg
 *
 * Does NOT copy standard nodes like Start, End etc that are fixed
 * in an irg. Instead, the corresponding nodes of the new irg are returned.
 * Note further, that the new nodes have no block.
 */
static void copy_irn_to_irg(ir_node *n, ir_graph *irg)
{
    /* do not copy standard nodes */
    ir_node *nn = NULL;
    switch (get_irn_opcode(n)) {
    case iro_NoMem:
        nn = get_irg_no_mem(irg);
        break;

    case iro_Block: {
        ir_graph *old_irg = get_irn_irg(n);
        if (n == get_irg_start_block(old_irg))
            nn = get_irg_start_block(irg);
        else if (n == get_irg_end_block(old_irg))
            nn = get_irg_end_block(irg);
        break;
    }

    case iro_Start:
        nn = get_irg_start(irg);
        break;

    case iro_End:
        nn = get_irg_end(irg);
        break;

    case iro_Proj: {
        ir_graph *old_irg = get_irn_irg(n);
        if (n == get_irg_frame(old_irg))
            nn = get_irg_frame(irg);
        else if (n == get_irg_initial_mem(old_irg))
            nn = get_irg_initial_mem(irg);
        else if (n == get_irg_args(old_irg))
            nn = get_irg_args(irg);
        break;
    }
    }

    if (nn) {
        set_irn_link(n, nn);
        return;
    }

    nn = new_ir_node(get_irn_dbg_info(n),
                     irg,
                     NULL,            /* no block yet, will be set later */
                     get_irn_op(n),
                     get_irn_mode(n),
                     get_irn_arity(n),
                     get_irn_in(n));


    /* Copy the attributes.  These might point to additional data.  If this
       was allocated on the old obstack the pointers now are dangling.  This
       frees e.g. the memory of the graph_arr allocated in new_immBlock. */
    copy_node_attr(irg, n, nn);
    set_irn_link(n, nn);
}
Пример #2
0
void compute_cdep(ir_graph *irg)
{
	free_cdep(irg);
	cdep_data = XMALLOC(cdep_info);
	obstack_init(&cdep_data->obst);

	cdep_data->cdep_map = pmap_create();

	assure_irg_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_POSTDOMINANCE);

	/* we must temporary change the post dominator relation:
	   the ipdom of the startblock is the end block.
	   Firm does NOT add the phantom edge from Start to End.
	 */
	ir_node *const start_block = get_irg_start_block(irg);
	ir_node *const end_block   = get_irg_end_block(irg);
	ir_node *const rem         = get_Block_ipostdom(start_block);
	set_Block_ipostdom(start_block, end_block);

	irg_block_walk_graph(irg, cdep_pre, NULL, NULL);

	(void) cdep_edge_hook;

	/* restore the post dominator relation */
	set_Block_ipostdom(start_block, rem);
}
Пример #3
0
ir_node *be_new_Start(ir_graph *const irg, be_start_out const *const outs)
{
	ir_node *const block  = get_irg_start_block(irg);
	ir_node *const start  = new_ir_node(NULL, irg, block, op_be_Start, mode_T, 0, NULL);
	unsigned const n_regs = isa_if->n_registers;

	/* Count the number of outsputs. */
	unsigned k = 1; /* +1 for memory */
	for (unsigned i = 0; i != n_regs; ++i) {
		if (outs[i] != BE_START_NO)
			++k;
	}

	be_info_init_irn(start, arch_irn_flag_schedule_first, NULL, k);

	/* Set out requirements and registers. */
	unsigned l = 0;
	arch_set_irn_register_req_out(start, l++, arch_memory_req);
	arch_register_t const *const regs = isa_if->registers;
	for (unsigned i = 0; i != n_regs; ++i) {
		if (outs[i] != BE_START_NO) {
			arch_register_t     const *const reg    = &regs[i];
			bool                       const ignore = outs[i] == BE_START_IGNORE;
			arch_register_req_t const *const req    = be_create_reg_req(irg, reg, ignore);
			arch_set_irn_register_req_out(start, l, req);
			arch_set_irn_register_out(    start, l, reg);
			++l;
		}
	}
	assert(l == k);

	return start;
}
Пример #4
0
ir_graph *create_irg_copy(ir_graph *irg)
{
	ir_graph *res = alloc_graph();

	res->irg_pinned_state = irg->irg_pinned_state;

	/* clone the frame type here for safety */
	irp_reserve_resources(irp, IRP_RESOURCE_ENTITY_LINK);
	res->frame_type  = clone_frame_type(irg->frame_type);

	ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK);

	/* copy all nodes from the graph irg to the new graph res */
	irg_walk_anchors(irg, copy_all_nodes, rewire, res);

	/* copy the Anchor node */
	res->anchor = get_new_node(irg->anchor);

	/* -- The end block -- */
	set_irg_end_block (res, get_new_node(get_irg_end_block(irg)));
	set_irg_end       (res, get_new_node(get_irg_end(irg)));

	/* -- The start block -- */
	set_irg_start_block(res, get_new_node(get_irg_start_block(irg)));
	set_irg_no_mem     (res, get_new_node(get_irg_no_mem(irg)));
	set_irg_start      (res, get_new_node(get_irg_start(irg)));

	/* Proj results of start node */
	set_irg_initial_mem(res, get_new_node(get_irg_initial_mem(irg)));

	ir_free_resources(irg, IR_RESOURCE_IRN_LINK);
	irp_free_resources(irp, IRP_RESOURCE_ENTITY_LINK);

	return res;
}
Пример #5
0
/**
 * mature all immature Blocks.
 */
static void finish_block(ir_node *block, void *env)
{
	(void)env;
	ir_graph *irg = get_irn_irg(block);
	if (block != get_irg_start_block(irg))
		mature_immBlock(block);
}
Пример #6
0
/**
 * Post-walker: prepare the graph nodes for new SSA construction cycle by
 * allocation new arrays.
 */
static void prepare_blocks(ir_node *block, void *env)
{
	(void)env;
	ir_graph *const irg   = get_irn_irg(block);
	unsigned  const n_loc = irg->n_loc;
	/* reset mature flag */
	if (block != get_irg_start_block(irg))
		set_Block_matured(block, 0);
	block->attr.block.graph_arr = NEW_ARR_DZ(ir_node*, get_irg_obstack(irg), n_loc);
	set_Block_phis(block, NULL);
}
Пример #7
0
static ir_node *create_gotpcrel_load(ir_graph  *irg, ir_entity *const entity)
{
	ir_node *const addr
		= be_new_Relocation(irg, X86_IMM_GOTPCREL, entity, mode_P);
	ir_type *const type  = get_entity_type(entity);
	ir_node *const nomem = get_irg_no_mem(irg);
	ir_node *const block = get_irg_start_block(irg);
	ir_node *const load  = new_rd_Load(NULL, block, nomem, addr, mode_P,
									   type, cons_floats);
	return new_r_Proj(load, mode_P, pn_Load_res);
}
Пример #8
0
ir_node *be_new_Relocation(dbg_info *const dbgi, ir_graph *const irg, unsigned const kind, ir_entity *const entity, ir_mode *const mode)
{
	ir_node *const block = get_irg_start_block(irg);
	ir_node *const node  = new_ir_node(dbgi, irg, block, op_be_Relocation,
	                                   mode, 0, NULL);
	be_relocation_attr_t *const attr
		= (be_relocation_attr_t*)get_irn_generic_attr(node);
	attr->entity = entity;
	attr->kind   = kind;
	ir_node *const optimized = optimize_node(node);
	return optimized;
}
Пример #9
0
/**
 * This function returns the last definition of a value.  In case
 * this value was last defined in a previous block, Phi nodes are
 * inserted.  If the part of the firm graph containing the definition
 * is not yet constructed, a dummy Phi node is returned.
 *
 * @param block   the current block
 * @param pos     the value number of the value searched
 * @param mode    the mode of this value (needed for Phi construction)
 */
static ir_node *get_r_value_internal(ir_node *block, int pos, ir_mode *mode)
{
	ir_node *res = block->attr.block.graph_arr[pos];
	if (res != NULL)
		return res;

	/* in a matured block we can immediately determine the phi arguments */
	if (get_Block_matured(block)) {
		ir_graph *const irg   = get_irn_irg(block);
		int       const arity = get_irn_arity(block);
		/* no predecessors: use unknown value */
		if (arity == 0) {
			if (block == get_irg_start_block(irg)) {
				if (default_initialize_local_variable != NULL) {
					ir_node *rem = get_r_cur_block(irg);
					set_r_cur_block(irg, block);
					res = default_initialize_local_variable(irg, mode, pos - 1);
					set_r_cur_block(irg, rem);
				} else {
					res = new_r_Unknown(irg, mode);
				}
			} else {
				goto bad; /* unreachable block, use Bad */
			}
		/* one predecessor just use its value */
		} else if (arity == 1) {
			ir_node *cfgpred = get_Block_cfgpred(block, 0);
			if (is_Bad(cfgpred)) {
bad:
				res = new_r_Bad(irg, mode);
			} else {
				ir_node *cfgpred_block = get_nodes_block(cfgpred);
				res = get_r_value_internal(cfgpred_block, pos, mode);
			}
		} else {
			/* multiple predecessors construct Phi */
			res = new_rd_Phi0(NULL, block, mode, pos);
			/* enter phi0 into our variable value table to break cycles
			 * arising from set_phi_arguments */
			block->attr.block.graph_arr[pos] = res;
			res = set_phi_arguments(res, pos);
		}
	} else {
		/* in case of immature block we have to keep a Phi0 */
		res = new_rd_Phi0(NULL, block, mode, pos);
		/* enqueue phi so we can set arguments once the block matures */
		res->attr.phi.next     = block->attr.block.phis;
		block->attr.block.phis = res;
	}
	block->attr.block.graph_arr[pos] = res;
	return res;
}
Пример #10
0
ir_graph *new_const_code_irg(void)
{
	ir_graph *const res = new_r_ir_graph(NULL, 0);
	mature_immBlock(get_irg_end_block(res));

	/* There is no Start node in the const_code_irg */
	set_irg_start(res, new_r_Bad(res, mode_T));
	set_irg_frame(res, new_r_Bad(res, mode_BAD));
	set_irg_args(res, new_r_Bad(res, mode_T));
	set_irg_initial_mem(res, new_r_Bad(res, mode_M));
	set_r_store(res, get_irg_no_mem(res));

	/* Set the visited flag high enough that the blocks will never be
	 * visited. */
	ir_node *const body_block = get_r_cur_block(res);
	set_irn_visited(body_block, -1);
	set_Block_block_visited(body_block, -1);
	ir_node *const start_block = get_irg_start_block(res);
	set_Block_block_visited(start_block, -1);
	set_irn_visited(start_block, -1);

	return res;
}
Пример #11
0
/**
 * Pre-walker for connecting DAGs and counting.
 */
static void connect_dags(ir_node *node, void *env)
{
	dag_env_t   *dag_env = (dag_env_t*)env;
	int         i, arity;
	ir_node     *block;
	dag_entry_t *entry;
	ir_mode     *mode;

	if (is_Block(node))
		return;

	block = get_nodes_block(node);

	/* ignore start end end blocks */
	ir_graph *const irg = get_Block_irg(block);
	if (block == get_irg_start_block(irg) || block == get_irg_end_block(irg))
		return;

	/* ignore Phi nodes */
	if (is_Phi(node))
		return;

	if (dag_env->options & FIRMSTAT_ARGS_ARE_ROOTS && is_arg(node))
		return;

	mode = get_irn_mode(node);
	if (mode == mode_X || mode == mode_M) {
		/* do NOT count mode_X and mode_M nodes */
		return;
	}

	/* if this option is set, Loads are always leaves */
	if (dag_env->options & FIRMSTAT_LOAD_IS_LEAVE && is_Load(node))
		return;

	if (dag_env->options & FIRMSTAT_CALL_IS_LEAVE && is_Call(node))
		return;

	entry = get_irn_dag_entry(node);

	if (! entry) {
		/* found an unassigned node, maybe a new root */
		entry = new_dag_entry(dag_env, node);
	}

	/* put the predecessors into the same DAG as the current */
	for (i = 0, arity = get_irn_arity(node); i < arity; ++i) {
		ir_node *prev = get_irn_n(node, i);
		ir_mode *mode = get_irn_mode(prev);

		if (is_Phi(prev))
			continue;

		if (mode == mode_X || mode == mode_M)
			continue;

		/*
		 * copy constants if requested into the DAG's
		 * beware, do NOT add a link, as this will result in
		 * wrong intersections
		 */
		if (dag_env->options & FIRMSTAT_COPY_CONSTANTS) {
			if (is_irn_constlike(prev)) {
				++entry->num_nodes;
				++entry->num_inner_nodes;
			}
		}

		/* only nodes from the same block goes into the DAG */
		if (get_nodes_block(prev) == block) {
			dag_entry_t *prev_entry = get_irn_dag_entry(prev);

			if (! prev_entry) {
				/* not assigned node, put it into the same DAG */
				set_irn_dag_entry(prev, entry);
				++entry->num_nodes;
				++entry->num_inner_nodes;
			} else {
				if (prev_entry == entry) {
					/* We found a node that is already assigned to this DAG.
					   This DAG is not a tree. */
					entry->is_tree = 0;
				} else {
					/* two DAGs intersect: copy the data to one of them
					   and kill the other */
					entry->num_roots       += prev_entry->num_roots;
					entry->num_nodes       += prev_entry->num_nodes;
					entry->num_inner_nodes += prev_entry->num_inner_nodes;
					entry->is_tree         &= prev_entry->is_tree;

					--dag_env->num_of_dags;

					prev_entry->is_dead = 1;
					prev_entry->link    = entry;
				}
			}
		}
	}
}
Пример #12
0
/**
 * Post-walker to detect DAG roots that are referenced form other blocks
 */
static void find_dag_roots(ir_node *node, void *env)
{
	dag_env_t   *dag_env = (dag_env_t*)env;
	int         i, arity;
	dag_entry_t *entry;
	ir_node     *block;

	if (is_Block(node))
		return;

	block = get_nodes_block(node);

	/* ignore start end end blocks */
	ir_graph *const irg = get_Block_irg(block);
	if (block == get_irg_start_block(irg) || block == get_irg_end_block(irg))
		return;

	/* Phi nodes always references nodes from "other" block */
	if (is_Phi(node)) {
		if (get_irn_mode(node) != mode_M) {
			for (i = 0, arity = get_irn_arity(node); i < arity; ++i) {
				ir_node *prev = get_irn_n(node, i);

				if (is_Phi(prev))
					continue;

				if (dag_env->options & FIRMSTAT_COPY_CONSTANTS) {
					if (is_irn_constlike(prev))
						continue;
				}

				entry = get_irn_dag_entry(prev);

				if (! entry) {
					/* found an unassigned node, a new root */
					entry = new_dag_entry(dag_env, node);
					entry->is_ext_ref = 1;
				}
			}
		}
	} else {

		for (i = 0, arity = get_irn_arity(node); i < arity; ++i) {
				ir_node *prev = get_irn_n(node, i);
				ir_mode *mode = get_irn_mode(prev);

				if (mode == mode_X || mode == mode_M)
					continue;

				if (is_Phi(prev))
					continue;

				if (dag_env->options & FIRMSTAT_COPY_CONSTANTS) {
					if (is_irn_constlike(prev))
						continue;
				}

				if (get_nodes_block(prev) != block) {
					/* The predecessor is from another block. It forms
					   a root. */
					entry = get_irn_dag_entry(prev);
					if (! entry) {
						/* found an unassigned node, a new root */
						entry = new_dag_entry(dag_env, node);
						entry->is_ext_ref = 1;
					}
				}
			}
	}
}