Esempio n. 1
0
void lower_CopyB(ir_graph *irg, unsigned max_small_sz, unsigned min_large_sz,
                 int allow_misaligns)
{
	const backend_params *bparams = be_get_backend_param();

	assert(max_small_sz < min_large_sz && "CopyB size ranges must not overlap");

	max_small_size      = max_small_sz;
	min_large_size      = min_large_sz;
	native_mode_bytes   = bparams->machine_size / 8;
	allow_misalignments = allow_misaligns;

	walk_env_t env = { .copybs = NEW_ARR_F(ir_node*, 0) };
	irg_walk_graph(irg, NULL, find_copyb_nodes, &env);

	bool changed = false;
	for (size_t i = 0, n = ARR_LEN(env.copybs); i != n; ++i) {
		lower_copyb_node(env.copybs[i]);
		changed = true;
	}
	confirm_irg_properties(irg, changed ? IR_GRAPH_PROPERTIES_CONTROL_FLOW
	                                    : IR_GRAPH_PROPERTIES_ALL);

	DEL_ARR_F(env.copybs);
}
Esempio n. 2
0
void lower_alloc(ir_graph *irg, unsigned new_po2_stack_alignment)
{
	if (new_po2_stack_alignment == 0)
		return;

	po2_stack_alignment = new_po2_stack_alignment;
	bool changed = false;
	irg_walk_graph(irg, NULL, lower_node, &changed);

	confirm_irg_properties(irg, changed ? IR_GRAPH_PROPERTIES_CONTROL_FLOW
	                                    : IR_GRAPH_PROPERTIES_ALL);
}
Esempio n. 3
0
void free_ir_graph(ir_graph *irg)
{
	assert(irg->kind == k_ir_graph);

	remove_irp_irg(irg);
	confirm_irg_properties(irg, IR_GRAPH_PROPERTIES_NONE);

	free_irg_outs(irg);
	del_identities(irg);
	if (irg->ent) {
		set_entity_irg(irg->ent, NULL);  /* not set in const code irg */
	}

	free_End(get_irg_end(irg));
	obstack_free(&irg->obst, NULL);
	if (irg->loc_descriptions)
		free(irg->loc_descriptions);
	irg->kind = k_BAD;
	free_graph(irg);
}
Esempio n. 4
0
void remove_tuples(ir_graph *irg)
{
	bool changed = false;
	irg_walk_graph(irg, exchange_tuple_projs, NULL, &changed);

	/* remove Tuples only held by keep-alive edges */
	ir_node *end = get_irg_end(irg);
	for (int i = get_End_n_keepalives(end); i-- > 0; ) {
		ir_node *irn = get_End_keepalive(end, i);
		if (is_Tuple(irn)) {
			remove_End_n(end, i);
			changed = true;
		}
	}

	confirm_irg_properties(irg, changed
			? IR_GRAPH_PROPERTIES_CONTROL_FLOW | IR_GRAPH_PROPERTY_ONE_RETURN
			  | IR_GRAPH_PROPERTY_MANY_RETURNS | IR_GRAPH_PROPERTY_NO_BADS
			: IR_GRAPH_PROPERTIES_ALL);
	add_irg_properties(irg, IR_GRAPH_PROPERTY_NO_TUPLES);
}
Esempio n. 5
0
/*
 * Optimize the frame type of an irg by removing
 * never touched entities.
 */
void opt_frame_irg(ir_graph *irg)
{
	ir_type   *frame_tp = get_irg_frame_type(irg);
	ir_entity *ent, *list;
	ir_node   *frame, *sel;
	size_t    i, n = get_class_n_members(frame_tp);
	int       o;

	if (n <= 0)
		return;

	assure_irg_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_OUTS);

	irp_reserve_resources(irp, IRP_RESOURCE_ENTITY_LINK);

	/* clear all entity links */
	for (i = n; i > 0;) {
		ent = get_class_member(frame_tp, --i);
		set_entity_link(ent, NULL);
	}

	/* look for uses */
	frame = get_irg_frame(irg);

	/* mark all used entities */
	for (o = get_irn_n_outs(frame) - 1; o >= 0; --o) {
		sel = get_irn_out(frame, o);
		if (is_Sel(sel)) {
			ent = get_Sel_entity(sel);
			/* only entities on the frame */
			if (get_entity_owner(ent) == frame_tp)
				set_entity_link(ent, ent);
		}
	}

	/* link unused ones */
	list = NULL;
	for (i = n; i > 0;) {
		ent = get_class_member(frame_tp, --i);
		/* beware of inner functions: those are NOT unused */
		if (get_entity_link(ent) == NULL && !is_method_entity(ent)) {
			set_entity_link(ent, list);
			list = ent;
		}
	}

	if (list != NULL) {
		/* delete list members */
		for (ent = list; ent; ent = list) {
			list = (ir_entity*)get_entity_link(ent);
			free_entity(ent);
		}
		/* we changed the frame type, its layout should be redefined */
		set_type_state(frame_tp, layout_undefined);
	}
	irp_free_resources(irp, IRP_RESOURCE_ENTITY_LINK);

	/* we changed the type, this affects none of the currently known graph
	 * properties, but I don't use ALL because I don't know if someone adds
	 * type-based properties at some point */
	confirm_irg_properties(irg,
		IR_GRAPH_PROPERTIES_CONTROL_FLOW
		| IR_GRAPH_PROPERTY_NO_BADS
		| IR_GRAPH_PROPERTY_NO_TUPLES
		| IR_GRAPH_PROPERTY_CONSISTENT_OUT_EDGES
		| IR_GRAPH_PROPERTY_CONSISTENT_OUTS
		| IR_GRAPH_PROPERTY_CONSISTENT_ENTITY_USAGE
		| IR_GRAPH_PROPERTY_MANY_RETURNS);
}
Esempio n. 6
0
/*
 * Normalize the Returns of a graph by creating a new End block
 * with One Return(Phi).
 * This is the preferred input for the if-conversion.
 *
 * In pseudocode, it means:
 *
 * if (a)
 *   return b;
 * else
 *   return c;
 *
 * is transformed into
 *
 * if (a)
 *   res = b;
 * else
 *   res = c;
 * return res;
 */
void normalize_one_return(ir_graph *irg)
{
    ir_node   *endbl         = get_irg_end_block(irg);
    ir_entity *entity        = get_irg_entity(irg);
    ir_type   *type          = get_entity_type(entity);
    int        n_ret_vals    = get_method_n_ress(type) + 1;
    int        n_rets        = 0;
    bool       filter_dbgi   = false;
    dbg_info  *combined_dbgi = NULL;
    int i, j, k, n, last_idx;
    ir_node **in, **retvals, **endbl_in;
    ir_node *block;

    /* look, if we have more than one return */
    n = get_Block_n_cfgpreds(endbl);
    if (n <= 0) {
        /* The end block has no predecessors, we have an endless
           loop. In that case, no returns exists. */
        confirm_irg_properties(irg, IR_GRAPH_PROPERTIES_ALL);
        add_irg_properties(irg, IR_GRAPH_PROPERTY_ONE_RETURN);
        return;
    }

    unsigned *const returns = rbitset_alloca(n);
    for (i = 0; i < n; ++i) {
        ir_node *node = get_Block_cfgpred(endbl, i);

        if (is_Return(node)) {
            dbg_info *dbgi = get_irn_dbg_info(node);

            if (dbgi != NULL && dbgi != combined_dbgi) {
                if (filter_dbgi) {
                    combined_dbgi = NULL;
                } else {
                    combined_dbgi = dbgi;
                    filter_dbgi   = true;
                }
            }

            ++n_rets;
            rbitset_set(returns, i);
        }
    }

    if (n_rets <= 1) {
        confirm_irg_properties(irg, IR_GRAPH_PROPERTIES_ALL);
        add_irg_properties(irg, IR_GRAPH_PROPERTY_ONE_RETURN);
        return;
    }

    in       = ALLOCAN(ir_node*, MAX(n_rets, n_ret_vals));
    retvals  = ALLOCAN(ir_node*, n_rets * n_ret_vals);
    endbl_in = ALLOCAN(ir_node*, n);

    last_idx = 0;
    for (j = i = 0; i < n; ++i) {
        ir_node *ret = get_Block_cfgpred(endbl, i);

        if (rbitset_is_set(returns, i)) {
            ir_node *block = get_nodes_block(ret);

            /* create a new Jmp for every Ret and place the in in */
            in[j] = new_r_Jmp(block);

            /* save the return values and shuffle them */
            for (k = 0; k < n_ret_vals; ++k)
                retvals[j + k*n_rets] = get_irn_n(ret, k);

            ++j;
        } else {
            endbl_in[last_idx++] = ret;
        }
    }

    /* ok, create a new block with all created in's */
    block = new_r_Block(irg, n_rets, in);

    /* now create the Phi nodes */
    for (j = i = 0; i < n_ret_vals; ++i, j += n_rets) {
        ir_mode *mode = get_irn_mode(retvals[j]);
        in[i] = new_r_Phi(block, n_rets, &retvals[j], mode);
    }

    endbl_in[last_idx++] = new_rd_Return(combined_dbgi, block, in[0], n_ret_vals-1, &in[1]);

    set_irn_in(endbl, last_idx, endbl_in);

    /* invalidate analysis information:
     * a new Block was added, so dominator, outs and loop are inconsistent,
     * trouts and callee-state should be still valid */
    confirm_irg_properties(irg,
                           IR_GRAPH_PROPERTY_NO_BADS
                           | IR_GRAPH_PROPERTY_NO_TUPLES
                           | IR_GRAPH_PROPERTY_NO_CRITICAL_EDGES
                           | IR_GRAPH_PROPERTY_NO_UNREACHABLE_CODE
                           | IR_GRAPH_PROPERTY_CONSISTENT_ENTITY_USAGE);
    add_irg_properties(irg, IR_GRAPH_PROPERTY_ONE_RETURN);
}