Ejemplo n.º 1
0
int has_irg_caller_backedge(const ir_graph *irg)
{
	if (irg->caller_isbe != NULL) {
		for (size_t i = 0, n_callers = get_irg_n_callers(irg);
		     i < n_callers; ++i)
			if (rbitset_is_set(irg->caller_isbe, i))
				return 1;
	}
	return 0;
}
Ejemplo n.º 2
0
bool arch_reg_is_allocatable(const arch_register_req_t *req,
                             const arch_register_t *reg)
{
	if (req->cls != reg->cls)
		return false;
	if (reg->is_virtual)
		return true;
	if (req->limited != NULL)
		return rbitset_is_set(req->limited, reg->index);
	return true;
}
Ejemplo n.º 3
0
static unsigned get_start_reg_index(ir_graph *irg, const arch_register_t *reg)
{
	/* do a naive linear search... */
	ir_node *start  = get_irg_start(irg);
	be_foreach_out(start, i) {
		arch_register_req_t const *const out_req
			= arch_get_irn_register_req_out(start, i);
		if (out_req->limited == NULL)
			continue;
		if (out_req->cls != reg->cls)
			continue;
		if (!rbitset_is_set(out_req->limited, reg->index))
			continue;
		return i;
	}
Ejemplo n.º 4
0
int is_irg_caller_backedge(const ir_graph *irg, size_t pos)
{
	assert(pos < get_irg_n_callers(irg));
	return irg->caller_isbe != NULL ? rbitset_is_set(irg->caller_isbe, pos) : 0;
}
Ejemplo n.º 5
0
/*
 * Normalize the Returns of a graph by creating a new End block
 * with One Return(Phi).
 * This is the preferred input for the if-conversion.
 *
 * In pseudocode, it means:
 *
 * if (a)
 *   return b;
 * else
 *   return c;
 *
 * is transformed into
 *
 * if (a)
 *   res = b;
 * else
 *   res = c;
 * return res;
 */
void normalize_one_return(ir_graph *irg)
{
    ir_node   *endbl         = get_irg_end_block(irg);
    ir_entity *entity        = get_irg_entity(irg);
    ir_type   *type          = get_entity_type(entity);
    int        n_ret_vals    = get_method_n_ress(type) + 1;
    int        n_rets        = 0;
    bool       filter_dbgi   = false;
    dbg_info  *combined_dbgi = NULL;
    int i, j, k, n, last_idx;
    ir_node **in, **retvals, **endbl_in;
    ir_node *block;

    /* look, if we have more than one return */
    n = get_Block_n_cfgpreds(endbl);
    if (n <= 0) {
        /* The end block has no predecessors, we have an endless
           loop. In that case, no returns exists. */
        confirm_irg_properties(irg, IR_GRAPH_PROPERTIES_ALL);
        add_irg_properties(irg, IR_GRAPH_PROPERTY_ONE_RETURN);
        return;
    }

    unsigned *const returns = rbitset_alloca(n);
    for (i = 0; i < n; ++i) {
        ir_node *node = get_Block_cfgpred(endbl, i);

        if (is_Return(node)) {
            dbg_info *dbgi = get_irn_dbg_info(node);

            if (dbgi != NULL && dbgi != combined_dbgi) {
                if (filter_dbgi) {
                    combined_dbgi = NULL;
                } else {
                    combined_dbgi = dbgi;
                    filter_dbgi   = true;
                }
            }

            ++n_rets;
            rbitset_set(returns, i);
        }
    }

    if (n_rets <= 1) {
        confirm_irg_properties(irg, IR_GRAPH_PROPERTIES_ALL);
        add_irg_properties(irg, IR_GRAPH_PROPERTY_ONE_RETURN);
        return;
    }

    in       = ALLOCAN(ir_node*, MAX(n_rets, n_ret_vals));
    retvals  = ALLOCAN(ir_node*, n_rets * n_ret_vals);
    endbl_in = ALLOCAN(ir_node*, n);

    last_idx = 0;
    for (j = i = 0; i < n; ++i) {
        ir_node *ret = get_Block_cfgpred(endbl, i);

        if (rbitset_is_set(returns, i)) {
            ir_node *block = get_nodes_block(ret);

            /* create a new Jmp for every Ret and place the in in */
            in[j] = new_r_Jmp(block);

            /* save the return values and shuffle them */
            for (k = 0; k < n_ret_vals; ++k)
                retvals[j + k*n_rets] = get_irn_n(ret, k);

            ++j;
        } else {
            endbl_in[last_idx++] = ret;
        }
    }

    /* ok, create a new block with all created in's */
    block = new_r_Block(irg, n_rets, in);

    /* now create the Phi nodes */
    for (j = i = 0; i < n_ret_vals; ++i, j += n_rets) {
        ir_mode *mode = get_irn_mode(retvals[j]);
        in[i] = new_r_Phi(block, n_rets, &retvals[j], mode);
    }

    endbl_in[last_idx++] = new_rd_Return(combined_dbgi, block, in[0], n_ret_vals-1, &in[1]);

    set_irn_in(endbl, last_idx, endbl_in);

    /* invalidate analysis information:
     * a new Block was added, so dominator, outs and loop are inconsistent,
     * trouts and callee-state should be still valid */
    confirm_irg_properties(irg,
                           IR_GRAPH_PROPERTY_NO_BADS
                           | IR_GRAPH_PROPERTY_NO_TUPLES
                           | IR_GRAPH_PROPERTY_NO_CRITICAL_EDGES
                           | IR_GRAPH_PROPERTY_NO_UNREACHABLE_CODE
                           | IR_GRAPH_PROPERTY_CONSISTENT_ENTITY_USAGE);
    add_irg_properties(irg, IR_GRAPH_PROPERTY_ONE_RETURN);
}
Ejemplo n.º 6
0
int main(void)
{
	unsigned *field0 = rbitset_malloc(66);
	unsigned *field1 = rbitset_alloca(66);
	unsigned field2[BITSET_SIZE_ELEMS(66)];
	memset(&field2, 0, sizeof(field2));

	assert(rbitset_is_empty(field0, 66));

	rbitset_set(field0, 14);
	assert(!rbitset_is_empty(field0, 66));
	rbitset_set_all(field1, 66);
	rbitset_clear(field1, 14);
	rbitset_flip_all(field1, 66);
	assert(rbitsets_equal(field0, field1, 66));
	assert(rbitset_is_set(field1, 14));
	assert(!rbitset_is_set(field1, 15));
	assert(!rbitset_is_set(field1, 44));

	rbitset_set_range(field2, 23, 55, true);
	rbitset_set_all(field0, 66);
	rbitset_set_range(field0, 0, 23, false);
	rbitset_set_range(field0, 54, 66, false);
	rbitset_flip(field0, 54);
	assert(rbitsets_equal(field2, field0, 66));
	rbitset_flip(field2, 13);
	rbitset_flip(field2, 64);
	assert(rbitset_popcount(field2, 66) == 34);

	rbitset_clear_all(field1, 66);
	assert(rbitset_is_empty(field1, 66));
	rbitset_set(field1, 3);
	rbitset_set(field1, 59);
	assert(rbitset_next(field1, 0, true) == 3);
	assert(rbitset_next(field1, 3, true) == 3);
	assert(rbitset_next(field1, 4, true) == 59);
	assert(rbitset_next(field1, 34, true) == 59);
	assert(rbitset_next(field1, 0, false) == 0);
	assert(rbitset_next(field1, 3, false) == 4);
	assert(rbitset_next_max(field1, 3, 66, false) == 4);
	assert(rbitset_next_max(field1, 60, 66, true) == (size_t)-1);
	assert(rbitset_next_max(field1, 3, 4, false) == (size_t)-1);
	assert(rbitset_prev(field1, 0, true) == (size_t)-1);
	assert(rbitset_prev(field1, 3, true) == (size_t)-1);
	assert(rbitset_prev(field1, 4, true) == 3);
	assert(rbitset_prev(field1, 59, true) == 3);
	assert(rbitset_prev(field1, 60, true) == 59);
	assert(rbitset_prev(field1, 34, true) == 3);
	assert(rbitset_prev(field1, 0, false) == (size_t)-1);
	assert(rbitset_prev(field1, 3, false) == 2);
	assert(rbitset_prev(field1, 1, false) == 0);

	unsigned *null = (unsigned*)0;
	rbitset_flip_all(null, 0);
	rbitset_set_all(null, 0);
	rbitset_flip_all(null, 0);
	rbitset_minus1(null, 0);
	rbitset_copy(null, NULL, 0);
	rbitset_xor(null, 0, 0);
	rbitset_and(null, 0, 0);
	rbitset_or(null, 0, 0);
	rbitset_andnot(null, 0, 0);
	assert(rbitsets_equal(null, NULL, 0));
	assert(rbitset_contains(null, NULL, 0));
	assert(!rbitsets_have_common(null, NULL, 0));
	assert(rbitset_next_max(null, 0, 0, true) == (size_t)-1);
	assert(rbitset_next_max(null, 0, 0, false) == (size_t)-1);
	assert(rbitset_popcount(null, 0) == 0);
	assert(rbitset_is_empty(null, 0));

	return 0;
}