示例#1
0
void ia32_cconv_init(void)
{
	for (size_t i = 0; i < ARRAY_SIZE(caller_saves); ++i) {
		rbitset_set(default_caller_saves, caller_saves[i]);
	}
	for (size_t i = 0; i < ARRAY_SIZE(callee_saves); ++i) {
		rbitset_set(default_callee_saves, callee_saves[i]);
	}
}
示例#2
0
void sparc_cconv_init(void)
{
	for (size_t i = 0; i < ARRAY_SIZE(caller_saves); ++i) {
		rbitset_set(default_caller_saves, caller_saves[i]);
	}

	rbitset_set_all(default_returns_twice_saves, N_SPARC_REGISTERS);
	for (size_t i = 0; i < ARRAY_SIZE(returns_twice_saved); ++i) {
		rbitset_clear(default_returns_twice_saves, returns_twice_saved[i]);
	}
	for (size_t i = 0; i < ARRAY_SIZE(ignore_regs); ++i) {
		rbitset_clear(default_returns_twice_saves, ignore_regs[i]);
	}

	for (size_t i = 0; i < ARRAY_SIZE(float_result_reqs_double); i += 2) {
		arch_register_req_t *req = &float_result_reqs_double[i];
		*req = *float_result_regs[i]->single_req;
		req->type |= arch_register_req_type_aligned;
		req->width = 2;
	}
	for (size_t i = 0; i < ARRAY_SIZE(float_result_reqs_quad); i += 4) {
		arch_register_req_t *req = &float_result_reqs_quad[i];
		*req = *float_result_regs[i]->single_req;
		req->type |= arch_register_req_type_aligned;
		req->width = 4;
	}
}
示例#3
0
/**
 * Mark the callee at position pos as a backedge.
 */
static void set_irg_callee_backedge(ir_graph *irg, size_t pos)
{
	/* allocate a new array on demand */
	size_t n = get_irg_n_callees(irg);
	if (irg->callee_isbe == NULL)
		irg->callee_isbe = rbitset_malloc(n);
	assert(pos < n);
	rbitset_set(irg->callee_isbe, pos);
}
示例#4
0
void ia32_cconv_init(void)
{
	be_cconv_add_regs(default_caller_saves, caller_saves_gp, ARRAY_SIZE(caller_saves_gp));
	be_cconv_add_regs(default_callee_saves, callee_saves, ARRAY_SIZE(callee_saves));
	if (!ia32_cg_config.use_softfloat) {
		be_cconv_add_regs(default_caller_saves, caller_saves_fp, ARRAY_SIZE(caller_saves_fp));
		rbitset_set(default_callee_saves, REG_FPCW);
	}
}
示例#5
0
/** Search the caller in the list of all callers and set its backedge property. */
static void set_irg_caller_backedge(ir_graph *irg, const ir_graph *caller)
{
	/* allocate a new array on demand */
	size_t n_callers = get_irg_n_callers(irg);
	if (irg->caller_isbe == NULL)
		irg->caller_isbe = rbitset_malloc(n_callers);
	for (size_t i = 0; i < n_callers; ++i) {
		if (get_irg_caller(irg, i) == caller) {
			rbitset_set(irg->caller_isbe, i);
			break;
		}
	}
}
示例#6
0
const arch_register_req_t *be_create_reg_req(struct obstack *obst,
                                             const arch_register_t *reg,
                                             bool ignore)
{
	arch_register_class_t const *cls     = reg->cls;
	unsigned                    *limited
		= rbitset_obstack_alloc(obst, cls->n_regs);
	rbitset_set(limited, reg->index);
	arch_register_req_t *req = OALLOCZ(obst, arch_register_req_t);
	req->cls     = cls;
	req->limited = limited;
	req->width   = 1;
	req->ignore  = ignore;
	return req;
}
示例#7
0
arch_register_req_t const *be_create_reg_req(ir_graph *const irg, arch_register_t const *const reg, bool const ignore)
{
	if (!ignore)
		return reg->single_req;

	struct obstack              *const obst    = be_get_be_obst(irg);
	arch_register_class_t const *const cls     = reg->cls;
	unsigned                    *const limited = rbitset_obstack_alloc(obst, cls->n_regs);
	rbitset_set(limited, reg->index);
	arch_register_req_t *const req = OALLOCZ(obst, arch_register_req_t);
	req->cls     = cls;
	req->limited = limited;
	req->width   = 1;
	req->ignore  = ignore;
	return req;
}
示例#8
0
static void arm_generate_code(FILE *output, const char *cup_name)
{
	be_gas_emit_types = false;
	be_gas_elf_type_char = '%';

	be_begin(output, cup_name);
	unsigned *const sp_is_non_ssa = rbitset_alloca(N_ARM_REGISTERS);
	rbitset_set(sp_is_non_ssa, REG_SP);

	arm_emit_file_prologue();

	foreach_irp_irg(i, irg) {
		if (!be_step_first(irg))
			continue;

		struct obstack *obst = be_get_be_obst(irg);
		be_birg_from_irg(irg)->isa_link = OALLOCZ(obst, arm_irg_data_t);

		be_birg_from_irg(irg)->non_ssa_regs = sp_is_non_ssa;
		arm_select_instructions(irg);

		be_step_schedule(irg);

		be_timer_push(T_RA_PREPARATION);
		be_sched_fix_flags(irg, &arm_reg_classes[CLASS_arm_flags], NULL, NULL, NULL);
		be_timer_pop(T_RA_PREPARATION);

		be_step_regalloc(irg, &arm_regalloc_if);

		be_timer_push(T_EMIT);
		arm_finish_graph(irg);
		arm_emit_function(irg);
		be_timer_pop(T_EMIT);

		be_step_last(irg);
	}

	be_finish();
}
示例#9
0
文件: return.c 项目: qznc/libfirm
/*
 * Normalize the Returns of a graph by creating a new End block
 * with One Return(Phi).
 * This is the preferred input for the if-conversion.
 *
 * In pseudocode, it means:
 *
 * if (a)
 *   return b;
 * else
 *   return c;
 *
 * is transformed into
 *
 * if (a)
 *   res = b;
 * else
 *   res = c;
 * return res;
 */
void normalize_one_return(ir_graph *irg)
{
    ir_node   *endbl         = get_irg_end_block(irg);
    ir_entity *entity        = get_irg_entity(irg);
    ir_type   *type          = get_entity_type(entity);
    int        n_ret_vals    = get_method_n_ress(type) + 1;
    int        n_rets        = 0;
    bool       filter_dbgi   = false;
    dbg_info  *combined_dbgi = NULL;
    int i, j, k, n, last_idx;
    ir_node **in, **retvals, **endbl_in;
    ir_node *block;

    /* look, if we have more than one return */
    n = get_Block_n_cfgpreds(endbl);
    if (n <= 0) {
        /* The end block has no predecessors, we have an endless
           loop. In that case, no returns exists. */
        confirm_irg_properties(irg, IR_GRAPH_PROPERTIES_ALL);
        add_irg_properties(irg, IR_GRAPH_PROPERTY_ONE_RETURN);
        return;
    }

    unsigned *const returns = rbitset_alloca(n);
    for (i = 0; i < n; ++i) {
        ir_node *node = get_Block_cfgpred(endbl, i);

        if (is_Return(node)) {
            dbg_info *dbgi = get_irn_dbg_info(node);

            if (dbgi != NULL && dbgi != combined_dbgi) {
                if (filter_dbgi) {
                    combined_dbgi = NULL;
                } else {
                    combined_dbgi = dbgi;
                    filter_dbgi   = true;
                }
            }

            ++n_rets;
            rbitset_set(returns, i);
        }
    }

    if (n_rets <= 1) {
        confirm_irg_properties(irg, IR_GRAPH_PROPERTIES_ALL);
        add_irg_properties(irg, IR_GRAPH_PROPERTY_ONE_RETURN);
        return;
    }

    in       = ALLOCAN(ir_node*, MAX(n_rets, n_ret_vals));
    retvals  = ALLOCAN(ir_node*, n_rets * n_ret_vals);
    endbl_in = ALLOCAN(ir_node*, n);

    last_idx = 0;
    for (j = i = 0; i < n; ++i) {
        ir_node *ret = get_Block_cfgpred(endbl, i);

        if (rbitset_is_set(returns, i)) {
            ir_node *block = get_nodes_block(ret);

            /* create a new Jmp for every Ret and place the in in */
            in[j] = new_r_Jmp(block);

            /* save the return values and shuffle them */
            for (k = 0; k < n_ret_vals; ++k)
                retvals[j + k*n_rets] = get_irn_n(ret, k);

            ++j;
        } else {
            endbl_in[last_idx++] = ret;
        }
    }

    /* ok, create a new block with all created in's */
    block = new_r_Block(irg, n_rets, in);

    /* now create the Phi nodes */
    for (j = i = 0; i < n_ret_vals; ++i, j += n_rets) {
        ir_mode *mode = get_irn_mode(retvals[j]);
        in[i] = new_r_Phi(block, n_rets, &retvals[j], mode);
    }

    endbl_in[last_idx++] = new_rd_Return(combined_dbgi, block, in[0], n_ret_vals-1, &in[1]);

    set_irn_in(endbl, last_idx, endbl_in);

    /* invalidate analysis information:
     * a new Block was added, so dominator, outs and loop are inconsistent,
     * trouts and callee-state should be still valid */
    confirm_irg_properties(irg,
                           IR_GRAPH_PROPERTY_NO_BADS
                           | IR_GRAPH_PROPERTY_NO_TUPLES
                           | IR_GRAPH_PROPERTY_NO_CRITICAL_EDGES
                           | IR_GRAPH_PROPERTY_NO_UNREACHABLE_CODE
                           | IR_GRAPH_PROPERTY_CONSISTENT_ENTITY_USAGE);
    add_irg_properties(irg, IR_GRAPH_PROPERTY_ONE_RETURN);
}
示例#10
0
int main(void)
{
	unsigned *field0 = rbitset_malloc(66);
	unsigned *field1 = rbitset_alloca(66);
	unsigned field2[BITSET_SIZE_ELEMS(66)];
	memset(&field2, 0, sizeof(field2));

	assert(rbitset_is_empty(field0, 66));

	rbitset_set(field0, 14);
	assert(!rbitset_is_empty(field0, 66));
	rbitset_set_all(field1, 66);
	rbitset_clear(field1, 14);
	rbitset_flip_all(field1, 66);
	assert(rbitsets_equal(field0, field1, 66));
	assert(rbitset_is_set(field1, 14));
	assert(!rbitset_is_set(field1, 15));
	assert(!rbitset_is_set(field1, 44));

	rbitset_set_range(field2, 23, 55, true);
	rbitset_set_all(field0, 66);
	rbitset_set_range(field0, 0, 23, false);
	rbitset_set_range(field0, 54, 66, false);
	rbitset_flip(field0, 54);
	assert(rbitsets_equal(field2, field0, 66));
	rbitset_flip(field2, 13);
	rbitset_flip(field2, 64);
	assert(rbitset_popcount(field2, 66) == 34);

	rbitset_clear_all(field1, 66);
	assert(rbitset_is_empty(field1, 66));
	rbitset_set(field1, 3);
	rbitset_set(field1, 59);
	assert(rbitset_next(field1, 0, true) == 3);
	assert(rbitset_next(field1, 3, true) == 3);
	assert(rbitset_next(field1, 4, true) == 59);
	assert(rbitset_next(field1, 34, true) == 59);
	assert(rbitset_next(field1, 0, false) == 0);
	assert(rbitset_next(field1, 3, false) == 4);
	assert(rbitset_next_max(field1, 3, 66, false) == 4);
	assert(rbitset_next_max(field1, 60, 66, true) == (size_t)-1);
	assert(rbitset_next_max(field1, 3, 4, false) == (size_t)-1);
	assert(rbitset_prev(field1, 0, true) == (size_t)-1);
	assert(rbitset_prev(field1, 3, true) == (size_t)-1);
	assert(rbitset_prev(field1, 4, true) == 3);
	assert(rbitset_prev(field1, 59, true) == 3);
	assert(rbitset_prev(field1, 60, true) == 59);
	assert(rbitset_prev(field1, 34, true) == 3);
	assert(rbitset_prev(field1, 0, false) == (size_t)-1);
	assert(rbitset_prev(field1, 3, false) == 2);
	assert(rbitset_prev(field1, 1, false) == 0);

	unsigned *null = (unsigned*)0;
	rbitset_flip_all(null, 0);
	rbitset_set_all(null, 0);
	rbitset_flip_all(null, 0);
	rbitset_minus1(null, 0);
	rbitset_copy(null, NULL, 0);
	rbitset_xor(null, 0, 0);
	rbitset_and(null, 0, 0);
	rbitset_or(null, 0, 0);
	rbitset_andnot(null, 0, 0);
	assert(rbitsets_equal(null, NULL, 0));
	assert(rbitset_contains(null, NULL, 0));
	assert(!rbitsets_have_common(null, NULL, 0));
	assert(rbitset_next_max(null, 0, 0, true) == (size_t)-1);
	assert(rbitset_next_max(null, 0, 0, false) == (size_t)-1);
	assert(rbitset_popcount(null, 0) == 0);
	assert(rbitset_is_empty(null, 0));

	return 0;
}
示例#11
0
void amd64_cconv_init(void)
{
	static const unsigned common_caller_saves[] = {
		REG_RAX,
		REG_RCX,
		REG_RDX,
		REG_R8,
		REG_R9,
		REG_R10,
		REG_R11,
		REG_XMM0,
		REG_XMM1,
		REG_XMM2,
		REG_XMM3,
		REG_XMM4,
		REG_XMM5,
		REG_XMM6,
		REG_XMM7,
		REG_XMM8,
		REG_XMM9,
		REG_XMM10,
		REG_XMM11,
		REG_XMM12,
		REG_XMM13,
		REG_XMM14,
		REG_XMM15,
	};
	for (size_t i = 0; i < ARRAY_SIZE(common_caller_saves); ++i) {
		rbitset_set(default_caller_saves, common_caller_saves[i]);
	}
	if (!amd64_use_x64_abi) {
		rbitset_set(default_caller_saves, REG_RSI);
		rbitset_set(default_caller_saves, REG_RDI);
	}

	static const unsigned common_callee_saves[] = {
		REG_RBX,
		REG_RBP,
		REG_R12,
		REG_R13,
		REG_R14,
		REG_R15,
	};
	for (size_t i = 0; i < ARRAY_SIZE(common_callee_saves); ++i) {
		rbitset_set(default_callee_saves, common_callee_saves[i]);
	}
	if (amd64_use_x64_abi) {
		rbitset_set(default_callee_saves, REG_RSI);
		rbitset_set(default_callee_saves, REG_RDI);
	}

	static const arch_register_t* const param_regs_list[] = {
		&amd64_registers[REG_RDI],
		&amd64_registers[REG_RSI],
		&amd64_registers[REG_RDX],
		&amd64_registers[REG_RCX],
		&amd64_registers[REG_R8],
		&amd64_registers[REG_R9],
	};
	param_regs   = amd64_use_x64_abi ? &param_regs_list[2] : param_regs_list;
	n_param_regs = ARRAY_SIZE(param_regs_list) - (amd64_use_x64_abi ? 2 : 0);

	n_float_param_regs = amd64_use_x64_abi ? 4 : ARRAY_SIZE(float_param_regs);
}