コード例 #1
0
ファイル: entity.c プロジェクト: qznc/libfirm
ir_node *get_atomic_ent_value(const ir_entity *entity)
{
	ir_initializer_t *initializer = get_entity_initializer(entity);

	assert(is_atomic_entity(entity));
	if (initializer == NULL) {
		ir_type *type = get_entity_type(entity);
		return new_r_Unknown(get_const_code_irg(), get_type_mode(type));
	}

	switch (get_initializer_kind(initializer)) {
	case IR_INITIALIZER_NULL: {
		ir_type *type = get_entity_type(entity);
		ir_mode *mode = get_type_mode(type);
		return new_r_Const(get_const_code_irg(), get_mode_null(mode));
	}
	case IR_INITIALIZER_TARVAL: {
		ir_tarval *tv = get_initializer_tarval_value(initializer);
		return new_r_Const(get_const_code_irg(), tv);
	}
	case IR_INITIALIZER_CONST:
		return get_initializer_const_value(initializer);
	case IR_INITIALIZER_COMPOUND:
		panic("compound initializer in atomic entity not allowed (%+F)", entity);
	}

	panic("invalid initializer kind (%+F)", entity);
}
コード例 #2
0
ファイル: lower_softfloat.c プロジェクト: lamproae/libfirm
static ir_type *lower_type_if_needed(ir_type *tp)
{
	bool need_lower = false;

	size_t const n_params = get_method_n_params(tp);
	for (size_t p = 0; p < n_params; ++p) {
		ir_type *ptp   = get_method_param_type(tp, p);
		ir_mode *pmode = get_type_mode(ptp);
		if (pmode && mode_is_float(pmode)) {
			need_lower = true;
			break;
		}
	}

	size_t const n_res = get_method_n_ress(tp);
	for (size_t i = 0; i < n_res; ++i) {
		ir_type *rtp   = get_method_res_type(tp, i);
		ir_mode *rmode = get_type_mode(rtp);
		if (rmode && mode_is_float(rmode)) {
			need_lower = true;
			break;
		}
	}

	if (need_lower) {
		return lower_method_type(tp);
	} else {
		return tp;
	}
}
コード例 #3
0
ファイル: entity.c プロジェクト: qznc/libfirm
void set_atomic_ent_value(ir_entity *entity, ir_node *val)
{
	assert(is_atomic_entity(entity));
	assert(is_Dummy(val) || get_irn_mode(val) == get_type_mode(entity->type));
	ir_initializer_t *initializer = create_initializer_const(val);
	entity->initializer = initializer;
}
コード例 #4
0
ファイル: lower_builtins.c プロジェクト: MatzeB/libfirm
static void replace_with_call(ir_node *node)
{
	widen_builtin(node);

	ir_type        *const mtp      = get_Builtin_type(node);
	ir_builtin_kind const kind     = get_Builtin_kind(node);
	char     const *const name     = get_builtin_name(kind);
	ir_type        *const arg1     = get_method_param_type(mtp, 0);
	char     const *const machmode = get_gcc_machmode(arg1);
	ident          *const id       = new_id_fmt("__%s%s2", name, machmode);
	ir_entity      *const entity
		= create_compilerlib_entity(get_id_str(id), mtp);

	dbg_info *const dbgi      = get_irn_dbg_info(node);
	ir_node  *const block     = get_nodes_block(node);
	ir_node  *const mem       = get_Builtin_mem(node);
	ir_graph *const irg       = get_irn_irg(node);
	ir_node  *const callee    = new_r_Address(irg, entity);
	int       const n_params  = get_Builtin_n_params(node);
	ir_node **const params    = get_Builtin_param_arr(node);
	ir_node  *const call      = new_rd_Call(dbgi, block, mem, callee, n_params, params, mtp);
	ir_node  *const call_mem  = new_r_Proj(call, mode_M, pn_Call_M);
	ir_node  *const call_ress = new_r_Proj(call, mode_T, pn_Call_T_result);
	ir_type  *const res_type  = get_method_res_type(mtp, 0);
	ir_mode  *const res_mode  = get_type_mode(res_type);
	ir_node  *const call_res  = new_r_Proj(call_ress, res_mode, 0);

	ir_node *const in[] = {
		[pn_Builtin_M]       = call_mem,
		[pn_Builtin_max + 1] = call_res,
	};
コード例 #5
0
static bool check_type_mode(const ir_type *tp)
{
	bool fine = true;
	if (get_type_mode(tp) == NULL) {
		report_error("type %+F has no mode", tp);
		fine = false;
	}
	return fine;
}
コード例 #6
0
ファイル: lower_softfloat.c プロジェクト: lamproae/libfirm
/**
 * @return The lowered method type.
 */
static ir_type *lower_method_type(ir_type *mtp)
{
	ir_type *res = pmap_get(ir_type, lowered_type, mtp);
	if (res != NULL)
		return res;

	size_t const n_param     = get_method_n_params(mtp);
	size_t const n_res       = get_method_n_ress(mtp);
	bool   const is_variadic = is_method_variadic(mtp);
	res = new_type_method(n_param, n_res, is_variadic);

	/* set param types and result types */
	for (size_t i = 0; i < n_param; ++i) {
		ir_type *ptp   = get_method_param_type(mtp, i);
		ir_mode *pmode = get_type_mode(ptp);

		if (pmode != NULL && mode_is_float(pmode)) {
			ptp = lower_type(ptp);
		}

		set_method_param_type(res, i, ptp);
	}
	for (size_t i = 0; i < n_res; ++i) {
		ir_type *rtp   = get_method_res_type(mtp, i);
		ir_mode *rmode = get_type_mode(rtp);

		if (rmode != NULL && mode_is_float(rmode)) {
			rtp = lower_type(rtp);
		}

		set_method_res_type(res, i, rtp);
	}

	copy_method_properties(res, mtp);

	set_higher_type(res, mtp);

	pmap_insert(lowered_type, mtp, res);
	return res;
}
コード例 #7
0
ファイル: lower_softfloat.c プロジェクト: lamproae/libfirm
static ir_node *make_softfloat_call(ir_node *const n, char const *const name,
                                    size_t const arity,
                                    ir_node *const *const in)
{
	dbg_info *const dbgi     = get_irn_dbg_info(n);
	ir_node  *const block    = get_nodes_block(n);
	ir_graph *const irg      = get_irn_irg(n);
	ir_node  *const nomem    = get_irg_no_mem(irg);
	ir_node  *const callee   = create_softfloat_address(n, name);
	ir_type  *const type     = get_softfloat_type(n);
	ir_mode  *const res_mode = get_type_mode(get_method_res_type(type, 0));
	ir_node  *const call     = new_rd_Call(dbgi, block, nomem, callee, arity,
	                                       in, type);
	ir_node  *const results  = new_r_Proj(call, mode_T, pn_Call_T_result);
	ir_node  *const result   = new_r_Proj(results, res_mode, 0);
	return result;
}
コード例 #8
0
ファイル: constfold.c プロジェクト: PeterReid/cparser-to-rust
void init_constfold(void)
{
	tarval_set_wrap_on_overflow(true);

	const backend_params *be_params = be_get_backend_param();

	/* initialize modes for arithmetic types */
	memset(atomic_modes, 0, sizeof(atomic_modes));
	const ir_type *const type_ld = be_params->type_long_double;
	if (type_ld != NULL)
		atomic_modes[ATOMIC_TYPE_LONG_DOUBLE] = get_type_mode(type_ld);
	mode_float_arithmetic = be_params->mode_float_arithmetic;

	for (int i = 0; i <= ATOMIC_TYPE_LAST; ++i) {
		if (atomic_modes[i] != NULL)
			continue;
		atomic_modes[i] = init_atomic_ir_mode((atomic_type_kind_t) i);
	}
}
コード例 #9
0
ファイル: bevarargs.c プロジェクト: MatzeB/libfirm
void be_default_lower_va_arg(ir_node *const node, bool const compound_is_ptr,
                             unsigned const stack_param_align)
{
	ir_node  *block = get_nodes_block(node);
	dbg_info *dbgi  = get_irn_dbg_info(node);
	ir_graph *irg   = get_irn_irg(node);

	ir_type       *aptype   = get_method_res_type(get_Builtin_type(node), 0);
	ir_node *const ap       = get_irn_n(node, 1);
	ir_node *const node_mem = get_Builtin_mem(node);

	ir_mode *apmode = get_type_mode(aptype);
	ir_node *res;
	ir_node *new_mem;
	if (apmode) {
		goto load;
	} else if (compound_is_ptr) {
		apmode = mode_P;
		aptype = get_type_for_mode(apmode);
load:;
		ir_node *const load = new_rd_Load(dbgi, block, node_mem, ap, apmode, aptype, cons_none);
		res     = new_r_Proj(load, apmode, pn_Load_res);
		new_mem = new_r_Proj(load, mode_M,pn_Load_M);
	} else {
		/* aptype has no associated mode, so it is represented as a pointer. */
		res     = ap;
		new_mem = node_mem;
	}

	unsigned const round_up    = round_up2(get_type_size(aptype),
	                                       stack_param_align);
	ir_mode *const offset_mode = get_reference_offset_mode(mode_P);
	ir_node *const offset      = new_r_Const_long(irg, offset_mode, round_up);
	ir_node *const new_ap      = new_rd_Add(dbgi, block, ap, offset);

	ir_node *const in[] = { new_mem, res, new_ap };
	turn_into_tuple(node, ARRAY_SIZE(in), in);
}
コード例 #10
0
ファイル: lower_softfloat.c プロジェクト: lamproae/libfirm
/**
 * @return An Address representing the function that replaces the given node.
 */
static ir_node *create_softfloat_address(const ir_node *n, const char *name)
{
	ir_type *const method = get_softfloat_type(n);

	/* Parameter types. */
	char const *first_param  = "";
	char const *second_param = "";
	unsigned    float_types  = 0;
	unsigned    double_types = 0;
	switch (get_method_n_params(method)) {
	case 2: {
		ir_type *const param_type = get_method_param_type(method, 1);
		ir_mode *const mode       = get_type_mode(param_type);
		if (mode == mode_F) {
			second_param = "sf";
			float_types++;
		} else if (mode == mode_D) {
			second_param = "df";
			double_types++;
		} else if (mode == mode_Iu || mode == mode_Is) {
			second_param = "si";
		} else if (mode == mode_Lu || mode == mode_Ls) {
			second_param = "di";
		}
	}
		/* FALLTHROUGH */
	case 1: {
		ir_type *const param_type = get_method_param_type(method, 0);
		ir_mode *const mode       = get_type_mode(param_type);
		if (mode == mode_F) {
			first_param = float_types > 0 ? "" : "sf";
			float_types++;
		} else if (mode == mode_D) {
			first_param = double_types > 0 ? "" : "df";
			double_types++;
		} else if (mode == mode_Iu || mode == mode_Is) {
			first_param = "si";
		} else if (mode == mode_Lu || mode == mode_Ls) {
			first_param = "di";
		}
		break;
	}

	default:
		break;
	}

	/* Result type. */
	char     const *result = "";
	ir_mode *const  mode   = is_Div(n) ? get_Div_resmode(n) : get_irn_mode(n);
	if (mode == mode_F) {
		result = float_types > 0 ? "" : "sf";
		float_types++;
	} else if (mode == mode_D) {
		result = double_types > 0 ? "" : "df";
		double_types++;
	} else if (mode == mode_Iu || mode == mode_Hu || mode == mode_Bu ||
	           mode == mode_Is || mode == mode_Hs || mode == mode_Bs)
		result = "si";
	else if (mode == mode_Lu || mode == mode_Ls)
		result = "di";

	assert(float_types <= 3);
	assert(double_types <= 3);

	ident *const id = float_types + double_types > 1 ?
		new_id_fmt("__%s%s%s%s%u", name, first_param, second_param, result, float_types + double_types) :
		new_id_fmt("__%s%s%s%s",   name, first_param, second_param, result);

	ir_graph  *const irg = get_irn_irg(n);
	ir_entity *const ent = create_compilerlib_entity(id, method);
	return new_r_Address(irg, ent);
}
コード例 #11
0
ファイル: ia32_cconv.c プロジェクト: dianpeng/libfirm
x86_cconv_t *ia32_decide_calling_convention(ir_type *function_type,
                                            ir_graph *irg)
{
	bool omit_fp = false;
	if (irg != NULL) {
		omit_fp = be_options.omit_fp;
		if (omit_fp)
			irg_walk_graph(irg, check_omit_fp, NULL, &omit_fp);
	}

	mtp_additional_properties mtp
		= get_method_additional_properties(function_type);
	(void)mtp;
	/* TODO: do something with cc_reg_param/cc_this_call */

	unsigned *caller_saves = rbitset_malloc(N_IA32_REGISTERS);
	unsigned *callee_saves = rbitset_malloc(N_IA32_REGISTERS);
	rbitset_copy(caller_saves, default_caller_saves, N_IA32_REGISTERS);
	rbitset_copy(callee_saves, default_callee_saves, N_IA32_REGISTERS);

	/* determine how parameters are passed */
	unsigned            n_params           = get_method_n_params(function_type);
	unsigned            param_regnum       = 0;
	unsigned            float_param_regnum = 0;
	reg_or_stackslot_t *params             = XMALLOCNZ(reg_or_stackslot_t,
	                                                   n_params);

	unsigned n_param_regs       = ARRAY_SIZE(default_param_regs);
	unsigned n_float_param_regs = ARRAY_SIZE(float_param_regs);
	unsigned stack_offset       = 0;
	for (unsigned i = 0; i < n_params; ++i) {
		ir_type            *param_type = get_method_param_type(function_type, i);
		reg_or_stackslot_t *param      = &params[i];
		if (is_aggregate_type(param_type)) {
			param->type   = param_type;
			param->offset = stack_offset;
			stack_offset += get_type_size_bytes(param_type);
			goto align_stack;
		}

		ir_mode *mode = get_type_mode(param_type);
		if (mode_is_float(mode) && float_param_regnum < n_float_param_regs) {
			param->reg = float_param_regs[float_param_regnum++];
		} else if (!mode_is_float(mode) && param_regnum < n_param_regs) {
			param->reg = default_param_regs[param_regnum++];
		} else {
			param->type   = param_type;
			param->offset = stack_offset;
			stack_offset += get_type_size_bytes(param_type);
align_stack:;
			/* increase offset by at least IA32_REGISTER_SIZE bytes so
			 * everything is aligned */
			unsigned misalign = stack_offset % IA32_REGISTER_SIZE;
			if (misalign > 0)
				stack_offset += IA32_REGISTER_SIZE - misalign;
		}
	}

	unsigned n_param_regs_used = param_regnum + float_param_regnum;

	/* determine how results are passed */
	unsigned            n_results           = get_method_n_ress(function_type);
	unsigned            n_reg_results       = 0;
	reg_or_stackslot_t *results = XMALLOCNZ(reg_or_stackslot_t, n_results);
	unsigned            res_regnum          = 0;
	unsigned            res_float_regnum    = 0;
	unsigned            n_result_regs       = ARRAY_SIZE(result_regs);
	unsigned            n_float_result_regs = ARRAY_SIZE(float_result_regs);
	for (size_t i = 0; i < n_results; ++i) {
		ir_type            *result_type = get_method_res_type(function_type, i);
		ir_mode            *result_mode = get_type_mode(result_type);
		reg_or_stackslot_t *result      = &results[i];

		const arch_register_t *reg;
		if (mode_is_float(result_mode)) {
			if (res_float_regnum >= n_float_result_regs) {
				panic("too many floating points results");
			}
			reg = float_result_regs[res_float_regnum++];
		} else {
			if (res_regnum >= n_result_regs) {
				panic("too many results");
			}
			reg = result_regs[res_regnum++];
		}
		result->reg = reg;
		rbitset_clear(caller_saves, reg->global_index);
		++n_reg_results;
	}

	calling_convention cc = get_method_calling_convention(function_type);

	x86_cconv_t *cconv    = XMALLOCZ(x86_cconv_t);
	cconv->sp_delta       = (cc & cc_compound_ret) && !(cc & cc_reg_param)
	                        ? IA32_REGISTER_SIZE : 0;
	cconv->parameters     = params;
	cconv->n_parameters   = n_params;
	cconv->callframe_size = stack_offset;
	cconv->n_param_regs   = n_param_regs_used;
	cconv->n_xmm_regs     = float_param_regnum;
	cconv->results        = results;
	cconv->omit_fp        = omit_fp;
	cconv->caller_saves   = caller_saves;
	cconv->callee_saves   = callee_saves;
	cconv->n_reg_results  = n_reg_results;

	if (irg != NULL) {
		be_irg_t       *birg      = be_birg_from_irg(irg);
		size_t          n_ignores = ARRAY_SIZE(ignore_regs);
		struct obstack *obst      = &birg->obst;

		birg->allocatable_regs = rbitset_obstack_alloc(obst, N_IA32_REGISTERS);
		rbitset_set_all(birg->allocatable_regs, N_IA32_REGISTERS);
		for (size_t r = 0; r < n_ignores; ++r) {
			rbitset_clear(birg->allocatable_regs, ignore_regs[r]);
		}
		if (!omit_fp)
			rbitset_clear(birg->allocatable_regs, REG_EBP);
	}

	return cconv;
}
コード例 #12
0
static bool check_initializer(const ir_initializer_t *initializer,
                              const ir_type *type,
                              const ir_entity *context)
{
	bool fine = true;
	switch (get_initializer_kind(initializer)) {
	case IR_INITIALIZER_NULL:
		return fine;
	case IR_INITIALIZER_TARVAL: {
		ir_tarval *tv = get_initializer_tarval_value(initializer);
		if (get_type_mode(type) != get_tarval_mode(tv)) {
			report_error("tarval initializer for entity %+F has wrong mode: %+F vs %+F", context, get_type_mode(type), get_tarval_mode(tv));
			fine = false;
		}
		return fine;
	}
	case IR_INITIALIZER_CONST: {
		ir_node *value = get_initializer_const_value(initializer);
		if (get_type_mode(type) != get_irn_mode(value)) {
			report_error("const initializer for entity %+F has wrong mode: %+F vs %+F", context, get_type_mode(type), get_irn_mode(value));
			fine = false;
		}
		if (!constant_on_correct_irg(value)) {
			report_error("initializer const value %+F for entity %+F not on const-code irg", value, context);
			fine = false;
		}
		return fine;
	}
	case IR_INITIALIZER_COMPOUND: {
		size_t n_entries = get_initializer_compound_n_entries(initializer);
		if (is_Array_type(type)) {
			ir_type *element_type = get_array_element_type(type);
			/* TODO: check array bounds? */
			for (size_t i = 0; i < n_entries; ++i) {
				const ir_initializer_t *sub_initializer
					= get_initializer_compound_value(initializer, i);
				check_initializer(sub_initializer, element_type, context);
			}
		} else if (is_compound_type(type)) {
			size_t n_members = get_compound_n_members(type);
			if (n_entries > n_members) {
				report_error("too many values in compound initializer of %+F",
				             context);
				fine = false;
			}
			for (size_t i = 0; i < n_entries; ++i) {
				if (i >= n_members)
					break;
				ir_entity *member      = get_compound_member(type, i);
				ir_type   *member_type = get_entity_type(member);
				const ir_initializer_t *sub_initializer
					= get_initializer_compound_value(initializer, i);
				check_initializer(sub_initializer, member_type, context);
			}
		} else {
			report_error("compound initiailizer for non-array/compound type in entity %+F",
			             context);
			fine = false;
		}
		return fine;
	}
	}
	report_error("invalid initializer for entity %+F", context);
	return false;
}
コード例 #13
0
ファイル: arm_cconv.c プロジェクト: MatzeB/libfirm
calling_convention_t *arm_decide_calling_convention(const ir_graph *irg,
                                                    ir_type *function_type)
{
	/* determine how parameters are passed */
	unsigned            stack_offset = 0;
	size_t const        n_param_regs = ARRAY_SIZE(param_regs);
	size_t const        n_params     = get_method_n_params(function_type);
	size_t              regnum       = 0;
	reg_or_stackslot_t *params       = XMALLOCNZ(reg_or_stackslot_t, n_params);

	for (size_t i = 0; i < n_params; ++i) {
		ir_type            *param_type = get_method_param_type(function_type,i);
		ir_mode            *mode       = get_type_mode(param_type);
		int                 bits       = get_mode_size_bits(mode);
		reg_or_stackslot_t *param      = &params[i];
		param->type = param_type;

		/* doubleword modes need to be passed in even registers */
		if (param_type->flags & tf_lowered_dw) {
			if (regnum < n_param_regs) {
				if ((regnum & 1) != 0)
					++regnum;
			} else {
				unsigned misalign = stack_offset % 8;
				if (misalign > 0)
					stack_offset += 8 - misalign;
			}
		}

		if (regnum < n_param_regs) {
			param->reg0 = param_regs[regnum++];
		} else {
			param->offset = stack_offset;
			/* increase offset 4 bytes so everything is aligned */
			stack_offset += MAX(bits / 8, 4);
			continue;
		}

		/* we might need a 2nd 32bit component (for 64bit or double values) */
		if (bits > 32) {
			if (bits > 64)
				panic("only 32 and 64bit modes supported");

			if (regnum < n_param_regs) {
				const arch_register_t *reg = param_regs[regnum++];
				param->reg1 = reg;
			} else {
				ir_mode *pmode = param_regs[0]->cls->mode;
				ir_type *type  = get_type_for_mode(pmode);
				param->type    = type;
				param->offset  = stack_offset;
				assert(get_mode_size_bits(pmode) == 32);
				stack_offset += 4;
			}
		}
	}
	unsigned const n_param_regs_used = regnum;

	size_t const        n_result_regs= ARRAY_SIZE(result_regs);
	size_t const n_float_result_regs = ARRAY_SIZE(float_result_regs);
	size_t              n_results    = get_method_n_ress(function_type);
	size_t              float_regnum = 0;
	reg_or_stackslot_t *results      = XMALLOCNZ(reg_or_stackslot_t, n_results);
	regnum = 0;
	for (size_t i = 0; i < n_results; ++i) {
		ir_type            *result_type = get_method_res_type(function_type, i);
		ir_mode            *result_mode = get_type_mode(result_type);
		reg_or_stackslot_t *result      = &results[i];

		if (mode_is_float(result_mode)) {
			if (float_regnum >= n_float_result_regs) {
				panic("too many float results");
			} else {
				const arch_register_t *reg = float_result_regs[float_regnum++];
				result->reg0 = reg;
			}
		} else {
			if (get_mode_size_bits(result_mode) > 32) {
				panic("results with more than 32bits not supported yet");
			}

			if (regnum >= n_result_regs) {
				panic("too many results");
			} else {
				const arch_register_t *reg = result_regs[regnum++];
				result->reg0 = reg;
			}
		}
	}

	calling_convention_t *cconv = XMALLOCZ(calling_convention_t);
	cconv->parameters       = params;
	cconv->n_parameters     = n_params;
	cconv->param_stack_size = stack_offset;
	cconv->n_param_regs     = n_param_regs_used;
	cconv->results          = results;

	/* setup allocatable registers */
	if (irg != NULL) {
		be_irg_t *birg = be_birg_from_irg(irg);

		assert(birg->allocatable_regs == NULL);
		birg->allocatable_regs = be_cconv_alloc_all_regs(&birg->obst, N_ARM_REGISTERS);
		be_cconv_rem_regs(birg->allocatable_regs, ignore_regs, ARRAY_SIZE(ignore_regs));
		arm_get_irg_data(irg)->omit_fp = true;
	}

	return cconv;
}
コード例 #14
0
ファイル: lower_hl.c プロジェクト: qznc/libfirm
/**
 * Lower a Sel node. Do not touch Sels accessing entities on the frame type.
 */
static void lower_sel(ir_node *sel)
{
	ir_graph  *irg   = get_irn_irg(sel);
	ir_entity *ent   = get_Sel_entity(sel);
	ir_type   *owner = get_entity_owner(ent);
	dbg_info  *dbg   = get_irn_dbg_info(sel);
	ir_mode   *mode  = get_irn_mode(sel);
	ir_node   *bl    = get_nodes_block(sel);
	ir_node   *newn;

	/* we can only replace Sels when the layout of the owner type is decided. */
	if (get_type_state(owner) != layout_fixed)
		return;

	if (0 < get_Sel_n_indexs(sel)) {
		/* an Array access */
		ir_type *basetyp = get_entity_type(ent);
		ir_mode *basemode;
		ir_node *index;
		if (is_Primitive_type(basetyp))
			basemode = get_type_mode(basetyp);
		else
			basemode = mode_P_data;

		assert(basemode && "no mode for lowering Sel");
		assert((get_mode_size_bits(basemode) % 8 == 0) && "can not deal with unorthodox modes");
		index = get_Sel_index(sel, 0);

		if (is_Array_type(owner)) {
			ir_type *arr_ty = owner;
			size_t   dims   = get_array_n_dimensions(arr_ty);
			size_t  *map    = ALLOCAN(size_t, dims);
			ir_mode *mode_Int = get_reference_mode_signed_eq(mode);
			ir_tarval *tv;
			ir_node *last_size;
			size_t   i;

			assert(dims == (size_t)get_Sel_n_indexs(sel)
				&& "array dimension must match number of indices of Sel node");

			for (i = 0; i < dims; i++) {
				size_t order = get_array_order(arr_ty, i);

				assert(order < dims &&
					"order of a dimension must be smaller than the arrays dim");
				map[order] = i;
			}
			newn = get_Sel_ptr(sel);

			/* Size of the array element */
			tv = new_tarval_from_long(get_type_size_bytes(basetyp), mode_Int);
			last_size = new_rd_Const(dbg, irg, tv);

			/*
			 * We compute the offset part of dimension d_i recursively
			 * with the the offset part of dimension d_{i-1}
			 *
			 *     off_0 = sizeof(array_element_type);
			 *     off_i = (u_i - l_i) * off_{i-1}  ; i >= 1
			 *
			 * whereas u_i is the upper bound of the current dimension
			 * and l_i the lower bound of the current dimension.
			 */
			for (i = dims; i > 0;) {
				size_t dim = map[--i];
				ir_node *lb, *ub, *elms, *n, *ind;

				elms = NULL;
				lb = get_array_lower_bound(arr_ty, dim);
				ub = get_array_upper_bound(arr_ty, dim);

				if (! is_Unknown(lb))
					lb = new_rd_Conv(dbg, bl, copy_const_value(get_irn_dbg_info(sel), lb, bl), mode_Int);
				else
					lb = NULL;

				if (! is_Unknown(ub))
					ub = new_rd_Conv(dbg, bl, copy_const_value(get_irn_dbg_info(sel), ub, bl), mode_Int);
				else
					ub = NULL;

				/*
				 * If the array has more than one dimension, lower and upper
				 * bounds have to be set in the non-last dimension.
				 */
				if (i > 0) {
					assert(lb != NULL && "lower bound has to be set in multi-dim array");
					assert(ub != NULL && "upper bound has to be set in multi-dim array");

					/* Elements in one Dimension */
					elms = new_rd_Sub(dbg, bl, ub, lb, mode_Int);
				}

				ind = new_rd_Conv(dbg, bl, get_Sel_index(sel, dim), mode_Int);

				/*
				 * Normalize index, id lower bound is set, also assume
				 * lower bound == 0
			 */
				if (lb != NULL)
					ind = new_rd_Sub(dbg, bl, ind, lb, mode_Int);

				n = new_rd_Mul(dbg, bl, ind, last_size, mode_Int);

				/*
				 * see comment above.
				 */
				if (i > 0)
					last_size = new_rd_Mul(dbg, bl, last_size, elms, mode_Int);

				newn = new_rd_Add(dbg, bl, newn, n, mode);
			}
		} else {
			/* no array type */
			ir_mode   *idx_mode = get_irn_mode(index);
			ir_tarval *tv       = new_tarval_from_long(get_mode_size_bytes(basemode), idx_mode);

			newn = new_rd_Add(dbg, bl, get_Sel_ptr(sel),
				new_rd_Mul(dbg, bl, index,
				new_r_Const(irg, tv),
				idx_mode),
				mode);
		}
	} else if (is_Method_type(get_entity_type(ent)) && is_Class_type(owner)) {
		/* We need an additional load when accessing methods from a dispatch
		 * table.
		 * Matze TODO: Is this really still used? At least liboo does its own
		 * lowering of Method-Sels...
		 */
		ir_mode   *ent_mode = get_type_mode(get_entity_type(ent));
		int        offset   = get_entity_offset(ent);
		ir_mode   *mode_Int = get_reference_mode_signed_eq(mode);
		ir_tarval *tv       = new_tarval_from_long(offset, mode_Int);
		ir_node   *cnst     = new_rd_Const(dbg, irg, tv);
		ir_node   *add      = new_rd_Add(dbg, bl, get_Sel_ptr(sel), cnst, mode);
		ir_node   *mem      = get_Sel_mem(sel);
		newn = new_rd_Load(dbg, bl, mem, add, ent_mode, cons_none);
		newn = new_r_Proj(newn, ent_mode, pn_Load_res);
	} else {
		int offset = get_entity_offset(ent);

		/* replace Sel by add(obj, const(ent.offset)) */
		newn = get_Sel_ptr(sel);
		if (offset != 0) {
			ir_mode   *mode_UInt = get_reference_mode_unsigned_eq(mode);
			ir_tarval *tv        = new_tarval_from_long(offset, mode_UInt);
			ir_node   *cnst      = new_r_Const(irg, tv);
			newn = new_rd_Add(dbg, bl, newn, cnst, mode);
		}
	}

	/* run the hooks */
	hook_lower(sel);

	exchange(sel, newn);
}
コード例 #15
0
ファイル: amd64_cconv.c プロジェクト: mj3-16/libfirm
x86_cconv_t *amd64_decide_calling_convention(ir_type *function_type,
                                             ir_graph *irg)
{
	bool omit_fp = false;
	if (irg != NULL) {
		omit_fp = be_options.omit_fp;
		if (omit_fp)
			irg_walk_graph(irg, check_omit_fp, NULL, &omit_fp);
		amd64_get_irg_data(irg)->omit_fp = omit_fp;
	}

	unsigned *caller_saves = rbitset_malloc(N_AMD64_REGISTERS);
	unsigned *callee_saves = rbitset_malloc(N_AMD64_REGISTERS);
	rbitset_copy(caller_saves, default_caller_saves, N_AMD64_REGISTERS);
	rbitset_copy(callee_saves, default_callee_saves, N_AMD64_REGISTERS);

	/* determine how parameters are passed */
	size_t              n_params           = get_method_n_params(function_type);
	size_t              param_regnum       = 0;
	size_t              float_param_regnum = 0;
	reg_or_stackslot_t *params             = XMALLOCNZ(reg_or_stackslot_t,
	                                                   n_params);
	/* x64 always reserves space to spill the first 4 arguments to have it
	 * easy in case of variadic functions. */
	unsigned stack_offset = amd64_use_x64_abi ? 32 : 0;
	for (size_t i = 0; i < n_params; ++i) {
		ir_type *param_type = get_method_param_type(function_type,i);
		if (is_compound_type(param_type))
			panic("compound arguments NIY");

		ir_mode *mode = get_type_mode(param_type);
		int      bits = get_mode_size_bits(mode);
		reg_or_stackslot_t *param = &params[i];

		if (mode_is_float(mode) && float_param_regnum < n_float_param_regs
		    && mode != x86_mode_E) {
			param->reg = float_param_regs[float_param_regnum++];
			if (amd64_use_x64_abi) {
				++param_regnum;
			}
		} else if (!mode_is_float(mode) && param_regnum < n_param_regs) {
			param->reg = param_regs[param_regnum++];
			if (amd64_use_x64_abi) {
				++float_param_regnum;
			}
		} else {
			param->type   = param_type;
			param->offset = stack_offset;
			/* increase offset by at least AMD64_REGISTER_SIZE bytes so
			 * everything is aligned */
			stack_offset += round_up2(bits / 8, AMD64_REGISTER_SIZE);
		}
	}

	/* If the function is variadic, we add all unused parameter
	 * passing registers to the end of the params array, first GP,
	 * then XMM. */
	if (irg && is_method_variadic(function_type)) {
		if (amd64_use_x64_abi) {
			panic("Variadic functions on Windows ABI not supported");
		}

		int params_remaining = (n_param_regs - param_regnum) +
			(n_float_param_regs - float_param_regnum);
		params = XREALLOC(params, reg_or_stackslot_t, n_params + params_remaining);
		size_t i = n_params;

		for (; param_regnum < n_param_regs; param_regnum++, i++) {
			params[i].reg = param_regs[param_regnum];
		}

		for (; float_param_regnum < n_float_param_regs; float_param_regnum++, i++) {
			params[i].reg = float_param_regs[float_param_regnum];
		}
	}

	unsigned n_param_regs_used
		= amd64_use_x64_abi ? param_regnum : param_regnum + float_param_regnum;

	/* determine how results are passed */
	size_t              n_results           = get_method_n_ress(function_type);
	unsigned            n_reg_results       = 0;
	reg_or_stackslot_t *results = XMALLOCNZ(reg_or_stackslot_t, n_results);
	unsigned            res_regnum          = 0;
	unsigned            res_float_regnum    = 0;
	unsigned            res_x87_regnum      = 0;
	size_t              n_result_regs       = ARRAY_SIZE(result_regs);
	size_t              n_float_result_regs = ARRAY_SIZE(float_result_regs);
	size_t              n_x87_result_regs   = ARRAY_SIZE(x87_result_regs);
	for (size_t i = 0; i < n_results; ++i) {
		ir_type            *result_type = get_method_res_type(function_type, i);
		ir_mode            *result_mode = get_type_mode(result_type);
		reg_or_stackslot_t *result      = &results[i];

		const arch_register_t *reg;
		if (result_mode == x86_mode_E) {
			if (res_x87_regnum >= n_x87_result_regs)
				panic("too manu x87 floating point results");
			reg = x87_result_regs[res_x87_regnum++];
		} else if (mode_is_float(result_mode)) {
			if (res_float_regnum >= n_float_result_regs) {
				panic("too many floating points results");
			}
			reg = float_result_regs[res_float_regnum++];
		} else {
			if (res_regnum >= n_result_regs) {
				panic("too many results");
			}
			reg = result_regs[res_regnum++];
		}
		result->reg = reg;
		rbitset_clear(caller_saves, reg->global_index);
		++n_reg_results;
	}

	x86_cconv_t *cconv     = XMALLOCZ(x86_cconv_t);
	cconv->parameters      = params;
	cconv->n_parameters    = n_params;
	cconv->param_stacksize = stack_offset;
	cconv->n_param_regs    = n_param_regs_used;
	cconv->n_xmm_regs      = float_param_regnum;
	cconv->results         = results;
	cconv->omit_fp         = omit_fp;
	cconv->caller_saves    = caller_saves;
	cconv->callee_saves    = callee_saves;
	cconv->n_reg_results   = n_reg_results;

	if (irg != NULL) {
		be_irg_t *birg = be_birg_from_irg(irg);

		birg->allocatable_regs = be_cconv_alloc_all_regs(&birg->obst, N_AMD64_REGISTERS);
		be_cconv_rem_regs(birg->allocatable_regs, ignore_regs, ARRAY_SIZE(ignore_regs));
		if (!omit_fp)
			rbitset_clear(birg->allocatable_regs, REG_RBP);
	}

	return cconv;
}
コード例 #16
0
ファイル: lower_softfloat.c プロジェクト: lamproae/libfirm
/**
 * @return The lowered (floating point) type.
 */
static ir_type *lower_type(ir_type *tp)
{
	ir_mode *mode         = get_type_mode(tp);
	ir_mode *lowered_mode = get_lowered_mode(mode);
	return get_type_for_mode(lowered_mode);
}
コード例 #17
0
ファイル: trverify.c プロジェクト: qznc/libfirm
int check_entity(const ir_entity *entity)
{
	bool        fine    = true;
	ir_type    *tp      = get_entity_type(entity);
	ir_linkage  linkage = get_entity_linkage(entity);

	fine &= constants_on_wrong_irg(entity);

	if (is_method_entity(entity)) {
		ir_graph *irg = get_entity_irg(entity);
		if (irg != NULL) {
			ir_entity *irg_entity = get_irg_entity(irg);
			if (irg_entity != entity) {
				report_error("entity(%+F)->irg->entity(%+F) relation invalid",
				             entity, irg_entity);
				fine = false;
			}
		}
		if (get_entity_peculiarity(entity) == peculiarity_existent) {
			ir_entity *impl = get_SymConst_entity(get_atomic_ent_value(entity));
			if (impl == NULL) {
				report_error("inherited method entity %+F must have constant pointing to existent entity.", entity);
				fine = false;
			}
		}
	}

	if (linkage & IR_LINKAGE_NO_CODEGEN) {
		if (!is_method_entity(entity)) {
			report_error("entity %+F has IR_LINKAGE_NO_CODEGEN but is not a function", entity);
			fine = false;
		} else if (get_entity_irg(entity) == NULL) {
			report_error("entity %+F has IR_LINKAGE_NO_CODEGEN but has no ir-graph anyway", entity);
			fine = false;
		}
		if (get_entity_visibility(entity) != ir_visibility_external) {
			report_error("entity %+F has IR_LINKAGE_NO_CODEGEN but is not externally visible", entity);
			fine = false;
		}
	}
	check_external_linkage(entity, IR_LINKAGE_WEAK, "WEAK");
	check_external_linkage(entity, IR_LINKAGE_GARBAGE_COLLECT,
	                       "GARBAGE_COLLECT");
	check_external_linkage(entity, IR_LINKAGE_MERGE, "MERGE");

	if (is_atomic_entity(entity) && entity->initializer != NULL) {
		ir_mode *mode = NULL;
		ir_initializer_t *initializer = entity->initializer;
		switch (initializer->kind) {
		case IR_INITIALIZER_CONST:
			mode = get_irn_mode(get_initializer_const_value(initializer));
			break;
		case IR_INITIALIZER_TARVAL:
			mode = get_tarval_mode(get_initializer_tarval_value(initializer));
			break;
		case IR_INITIALIZER_NULL:
		case IR_INITIALIZER_COMPOUND:
			break;
		}
		if (mode != NULL && mode != get_type_mode(tp)) {
			report_error("initializer of entity %+F has wrong mode.", entity);
			fine = false;
		}
	}
	return fine;
}
コード例 #18
0
ファイル: sparc_cconv.c プロジェクト: lamproae/libfirm
calling_convention_t *sparc_decide_calling_convention(ir_type *function_type,
                                                      ir_graph *irg)
{
	bool omit_fp = false;
	if (irg != NULL) {
		omit_fp = be_options.omit_fp;
		/* our current vaarg handling needs the standard space to store the
		 * args 0-5 in it */
		if (is_method_variadic(function_type))
			omit_fp = false;
		/* The pointer to the aggregate return value belongs to the 92 magic bytes.
		 * Thus, if the called functions increases the stack size,
		 * it must copy the value to the appropriate location.
		 * This is not implemented yet, so we forbid to omit the frame pointer.
		 */
		if (get_method_calling_convention(function_type) & cc_compound_ret)
			omit_fp = false;
		if (omit_fp)
			irg_walk_graph(irg, check_omit_fp, NULL, &omit_fp);
		sparc_get_irg_data(irg)->omit_fp = omit_fp;
	}

	mtp_additional_properties mtp
		= get_method_additional_properties(function_type);
	unsigned *caller_saves = rbitset_malloc(N_SPARC_REGISTERS);
	if (mtp & mtp_property_returns_twice) {
		rbitset_copy(caller_saves, default_returns_twice_saves,
		             N_SPARC_REGISTERS);
	} else {
		rbitset_copy(caller_saves, default_caller_saves, N_SPARC_REGISTERS);
	}

	/* determine how parameters are passed */
	int                 n_params = get_method_n_params(function_type);
	int                 regnum   = 0;
	reg_or_stackslot_t *params   = XMALLOCNZ(reg_or_stackslot_t, n_params);

	int      n_param_regs = ARRAY_SIZE(param_regs);
	unsigned stack_offset = !omit_fp ? SPARC_MIN_STACKSIZE : 0;
	for (int i = 0; i < n_params; ++i) {
		ir_type            *param_type = get_method_param_type(function_type,i);
		ir_mode            *mode;
		int                 bits;
		reg_or_stackslot_t *param;

		if (is_compound_type(param_type))
			panic("compound arguments not supported yet");

		mode  = get_type_mode(param_type);
		bits  = get_mode_size_bits(mode);
		param = &params[i];

		if (i == 0 &&
		    (get_method_calling_convention(function_type) & cc_compound_ret)) {
			assert(mode_is_reference(mode) && bits == 32);
			/* special case, we have reserved space for this on the between
			 * type */
			param->type   = param_type;
			param->offset = SPARC_AGGREGATE_RETURN_OFFSET;
			param->already_stored = true;
			continue;
		}

		if (regnum < n_param_regs) {
			param->offset = SPARC_PARAMS_SPILL_OFFSET
			                + regnum * SPARC_REGISTER_SIZE;
			param->type   = param_type;
			arch_register_t const *reg = param_regs[regnum++];
			if (irg == NULL || omit_fp)
				reg = map_i_to_o_reg(reg);
			param->reg0 = reg;
			param->req0 = reg->single_req;
		} else {
			param->type   = param_type;
			param->offset = stack_offset;
			param->already_stored = true;
			/* increase offset by at least SPARC_REGISTER_SIZE bytes so
			 * everything is aligned */
			stack_offset += MAX(bits / 8, SPARC_REGISTER_SIZE);
			continue;
		}

		/* we might need a 2nd 32bit component (for 64bit or double values) */
		if (bits > 32) {
			if (bits > 64)
				panic("only 32 and 64bit modes supported");

			if (regnum < n_param_regs) {
				param->offset = SPARC_PARAMS_SPILL_OFFSET
				                + regnum * SPARC_REGISTER_SIZE;
				arch_register_t const *reg = param_regs[regnum++];
				if (irg == NULL || omit_fp)
					reg = map_i_to_o_reg(reg);
				param->reg1 = reg;
				param->req1 = reg->single_req;
			} else {
				ir_mode *regmode = param_regs[0]->cls->mode;
				ir_type *type    = get_type_for_mode(regmode);
				param->type      = type;
				param->offset    = stack_offset;
				assert(get_mode_size_bits(regmode) == 32);
				stack_offset += SPARC_REGISTER_SIZE;
			}
		}
	}
	unsigned n_param_regs_used = regnum;

	/* determine how results are passed */
	int                 n_results           = get_method_n_ress(function_type);
	unsigned            float_regnum        = 0;
	unsigned            n_reg_results       = 0;
	unsigned            n_float_result_regs = ARRAY_SIZE(float_result_regs);
	reg_or_stackslot_t *results = XMALLOCNZ(reg_or_stackslot_t, n_results);
	regnum        = 0;
	for (int i = 0; i < n_results; ++i) {
		ir_type            *result_type = get_method_res_type(function_type, i);
		ir_mode            *result_mode = get_type_mode(result_type);
		reg_or_stackslot_t *result      = &results[i];

		if (mode_is_float(result_mode)) {
			unsigned n_regs   = determine_n_float_regs(result_mode);
			unsigned next_reg = round_up2(float_regnum, n_regs);

			if (next_reg >= n_float_result_regs) {
				panic("too many float results");
			} else {
				const arch_register_t *reg = float_result_regs[next_reg];
				rbitset_clear(caller_saves, reg->global_index);
				if (n_regs == 1) {
					result->req0 = reg->single_req;
				} else if (n_regs == 2) {
					result->req0 = &float_result_reqs_double[next_reg];
					rbitset_clear(caller_saves, reg->global_index+1);
				} else if (n_regs == 4) {
					result->req0 = &float_result_reqs_quad[next_reg];
					rbitset_clear(caller_saves, reg->global_index+1);
					rbitset_clear(caller_saves, reg->global_index+2);
					rbitset_clear(caller_saves, reg->global_index+3);
				} else {
					panic("invalid number of registers in result");
				}
				float_regnum = next_reg + n_regs;

				++n_reg_results;
			}
		} else {
			if (get_mode_size_bits(result_mode) > 32) {
				panic("results with more than 32bits not supported yet");
			}

			if (regnum >= n_param_regs) {
				panic("too many results");
			} else {
				const arch_register_t *reg = param_regs[regnum++];
				if (irg == NULL || omit_fp)
					reg = map_i_to_o_reg(reg);
				result->req0 = reg->single_req;
				rbitset_clear(caller_saves, reg->global_index);
				++n_reg_results;
			}
		}
	}

	calling_convention_t *cconv = XMALLOCZ(calling_convention_t);
	cconv->n_parameters     = n_params;
	cconv->parameters       = params;
	cconv->param_stack_size = stack_offset - SPARC_MIN_STACKSIZE;
	cconv->n_param_regs     = n_param_regs_used;
	cconv->results          = results;
	cconv->omit_fp          = omit_fp;
	cconv->caller_saves     = caller_saves;
	cconv->n_reg_results    = n_reg_results;

	/* setup ignore register array */
	if (irg != NULL) {
		be_irg_t *birg = be_birg_from_irg(irg);

		birg->allocatable_regs = be_cconv_alloc_all_regs(&birg->obst, N_SPARC_REGISTERS);
		be_cconv_rem_regs(birg->allocatable_regs, ignore_regs, ARRAY_SIZE(ignore_regs));
	}

	return cconv;
}
コード例 #19
0
ファイル: amd64_cconv.c プロジェクト: davidgiven/libfirm
x86_cconv_t *amd64_decide_calling_convention(ir_type *function_type,
                                             ir_graph *irg)
{
	bool omit_fp = false;
	if (irg != NULL) {
		omit_fp = be_options.omit_fp;
		if (omit_fp)
			irg_walk_graph(irg, check_omit_fp, NULL, &omit_fp);
	}

	mtp_additional_properties mtp
		= get_method_additional_properties(function_type);
	unsigned *caller_saves = rbitset_malloc(N_AMD64_REGISTERS);
	unsigned *callee_saves = rbitset_malloc(N_AMD64_REGISTERS);
	if (mtp & mtp_property_returns_twice)
		panic("amd64: returns_twice calling convention NIY");
	rbitset_copy(caller_saves, default_caller_saves, N_AMD64_REGISTERS);
	rbitset_copy(callee_saves, default_callee_saves, N_AMD64_REGISTERS);

	/* determine how parameters are passed */
	size_t              n_params           = get_method_n_params(function_type);
	size_t              param_regnum       = 0;
	size_t              float_param_regnum = 0;
	reg_or_stackslot_t *params             = XMALLOCNZ(reg_or_stackslot_t,
	                                                   n_params);
	/* x64 always reserves space to spill the first 4 arguments to have it
	 * easy in case of variadic functions. */
	unsigned stack_offset = amd64_use_x64_abi ? 32 : 0;
	for (size_t i = 0; i < n_params; ++i) {
		ir_type *param_type = get_method_param_type(function_type,i);
		if (is_compound_type(param_type))
			panic("amd64: compound arguments NIY");

		ir_mode *mode = get_type_mode(param_type);
		int      bits = get_mode_size_bits(mode);
		reg_or_stackslot_t *param = &params[i];

		if (mode_is_float(mode) && float_param_regnum < n_float_param_regs) {
			param->reg = float_param_regs[float_param_regnum++];
			if (amd64_use_x64_abi)
				++param_regnum;
		} else if (!mode_is_float(mode) && param_regnum < n_param_regs) {
			param->reg = param_regs[param_regnum++];
			if (amd64_use_x64_abi)
				++float_param_regnum;
		} else {
			param->type   = param_type;
			param->offset = stack_offset;
			/* increase offset by at least AMD64_REGISTER_SIZE bytes so
			 * everything is aligned */
			stack_offset += MAX(bits / 8, AMD64_REGISTER_SIZE);
			continue;
		}

	}

	unsigned n_param_regs_used
		= amd64_use_x64_abi ? param_regnum : param_regnum + float_param_regnum;

	/* determine how results are passed */
	size_t              n_results           = get_method_n_ress(function_type);
	unsigned            n_reg_results       = 0;
	reg_or_stackslot_t *results = XMALLOCNZ(reg_or_stackslot_t, n_results);
	unsigned            res_regnum          = 0;
	unsigned            res_float_regnum    = 0;
	size_t              n_result_regs       = ARRAY_SIZE(result_regs);
	size_t              n_float_result_regs = ARRAY_SIZE(float_result_regs);
	for (size_t i = 0; i < n_results; ++i) {
		ir_type            *result_type = get_method_res_type(function_type, i);
		ir_mode            *result_mode = get_type_mode(result_type);
		reg_or_stackslot_t *result      = &results[i];

		const arch_register_t *reg;
		if (mode_is_float(result_mode)) {
			if (res_float_regnum >= n_float_result_regs) {
				panic("too many floating points results");
			}
			reg = float_result_regs[res_float_regnum++];
		} else {
			if (res_regnum >= n_result_regs) {
				panic("too many results");
			}
			reg = result_regs[res_regnum++];
		}
		result->reg = reg;
		rbitset_clear(caller_saves, reg->global_index);
		++n_reg_results;
	}

	x86_cconv_t *cconv    = XMALLOCZ(x86_cconv_t);
	cconv->parameters     = params;
	cconv->callframe_size = stack_offset;
	cconv->n_param_regs   = n_param_regs_used;
	cconv->n_xmm_regs     = float_param_regnum;
	cconv->results        = results;
	cconv->omit_fp        = omit_fp;
	cconv->caller_saves   = caller_saves;
	cconv->callee_saves   = callee_saves;
	cconv->n_reg_results  = n_reg_results;

	if (irg != NULL) {
		be_irg_t       *birg      = be_birg_from_irg(irg);
		size_t          n_ignores = ARRAY_SIZE(ignore_regs);
		struct obstack *obst      = &birg->obst;

		birg->allocatable_regs = rbitset_obstack_alloc(obst, N_AMD64_REGISTERS);
		rbitset_set_all(birg->allocatable_regs, N_AMD64_REGISTERS);
		for (size_t r = 0; r < n_ignores; ++r) {
			rbitset_clear(birg->allocatable_regs, ignore_regs[r]);
		}
		if (!omit_fp)
			rbitset_clear(birg->allocatable_regs, REG_RBP);
	}

	return cconv;
}