Beispiel #1
0
int values_in_mode(const ir_mode *sm, const ir_mode *lm)
{
    assert(sm != NULL);
    assert(lm != NULL);
    if (sm == lm)
        return true;

    if (sm == mode_b)
        return mode_is_int(lm) || mode_is_float(lm);

    ir_mode_arithmetic larith = get_mode_arithmetic(lm);
    ir_mode_arithmetic sarith = get_mode_arithmetic(sm);
    switch (larith) {
    case irma_x86_extended_float:
    case irma_ieee754:
        if (sarith == irma_ieee754 || sarith == irma_x86_extended_float) {
            return get_mode_size_bits(sm) <= get_mode_size_bits(lm);
        } else if (sarith == irma_twos_complement) {
            unsigned int_mantissa
                = get_mode_size_bits(sm) - (mode_is_signed(sm) ? 1 : 0);
            unsigned float_mantissa = get_mode_mantissa_size(lm) + 1;
            return int_mantissa <= float_mantissa;
        }
        break;
    case irma_twos_complement:
        if (sarith == irma_twos_complement)
            return get_mode_size_bits(sm) <= get_mode_size_bits(lm);
        break;
    case irma_none:
        break;
    }
    return false;
}
Beispiel #2
0
int smaller_mode(const ir_mode *sm, const ir_mode *lm)
{
    assert(sm != NULL);
    assert(lm != NULL);
    if (sm == lm) return true;

    switch (get_mode_sort(sm)) {
    case irms_int_number:
        switch (get_mode_sort(lm)) {
        case irms_int_number:
            if (get_mode_arithmetic(sm) != get_mode_arithmetic(lm))
                return false;

            /* only two complement implemented */
            assert(get_mode_arithmetic(sm) == irma_twos_complement);

            /* integers are convertable if
             * - both have the same sign and lm is the larger one
             * - lm is signed and is at least one bit larger (the sign) */
            unsigned sm_bits = get_mode_size_bits(sm);
            unsigned lm_bits = get_mode_size_bits(lm);
            if (mode_is_signed(sm)) {
                if (!mode_is_signed(lm))
                    return false;
            } else {
                if (mode_is_signed(lm))
                    return sm_bits < lm_bits;
            }
            return sm_bits <= lm_bits;

        case irms_auxiliary:
        case irms_data:
        case irms_internal_boolean:
        case irms_reference:
        case irms_float_number:
            /* int to float works if the float is large enough */
            return false;
        }
        panic("invalid mode_sort");

    case irms_float_number:
        return get_mode_arithmetic(sm) == get_mode_arithmetic(lm)
               && mode_is_float(lm)
               && get_mode_size_bits(lm) >= get_mode_size_bits(sm);

    case irms_auxiliary:
    case irms_data:
    case irms_internal_boolean:
    case irms_reference:
        /* do exist machines out there with different pointer lengths ?*/
        return false;
    }

    panic("invalid mode_sort");
}
Beispiel #3
0
int is_reinterpret_cast(const ir_mode *src, const ir_mode *dst)
{
    if (src == dst)
        return true;
    if (get_mode_size_bits(src) != get_mode_size_bits(dst))
        return false;
    ir_mode_arithmetic ma = get_mode_arithmetic(src);
    if (ma != get_mode_arithmetic(dst))
        return false;

    return ma == irma_twos_complement;
}
Beispiel #4
0
/**
 * Transforms a Conv into the appropriate soft float function.
 */
static bool lower_Conv(ir_node *const n)
{
	dbg_info *const dbgi    = get_irn_dbg_info(n);
	ir_node  *const block   = get_nodes_block(n);
	ir_mode  *const mode    = get_irn_mode(n);
	ir_node        *op      = get_Conv_op(n);
	ir_mode        *op_mode = get_irn_mode(op);

	char const *name;
	if (!mode_is_float(mode)) {
		if (!mode_is_float(op_mode))
			return false;
		if (mode_is_signed(mode))
			name = "fix";
		else
			name = "fixuns";
	} else if (!mode_is_float(op_mode)) {
		ir_mode *min_mode;
		if (mode_is_signed(op_mode)) {
			name     = "float";
			min_mode = mode_Is;
		} else {
			name     = "floatun";
			min_mode = mode_Iu;
		}
		if (get_mode_size_bits(op_mode) < get_mode_size_bits(min_mode)) {
			op_mode = min_mode;
			op      = new_rd_Conv(dbgi, block, op, op_mode);
		}
	} else {
		/* Remove unnecessary Convs. */
		if (op_mode == mode) {
			exchange(n, op);
			return true;
		}
		if (get_mode_size_bits(op_mode) > get_mode_size_bits(mode))
			name = "trunc";
		else
			name = "extend";
	}

	ir_node *const in[]   = { op };
	ir_node       *result = make_softfloat_call(n, name, ARRAY_SIZE(in), in);

	/* Check whether we need a Conv for the result. */
	if (get_irn_mode(result) != mode)
		result = new_rd_Conv(dbgi, block, result, mode);

	exchange(n, result);
	return true;
}
Beispiel #5
0
unsigned get_type_alignment_bytes(ir_type *tp)
{
	unsigned align = 1;
	if (tp->align > 0)
		return tp->align;

	/* alignment NOT set calculate it "on demand" */
	if (tp->mode)
		align = (get_mode_size_bits(tp->mode) + 7) >> 3;
	else if (is_Array_type(tp))
Beispiel #6
0
static void emit_register_mode(const arch_register_t *reg, const ir_mode *mode)
{
	const char *name;
	switch (get_mode_size_bits(mode)) {
	case 8:  name = get_8bit_name(reg);  break;
	case 16: name = get_16bit_name(reg); break;
	case 32: name = get_32bit_name(reg); break;
	case 64: name = reg->name;           break;
	default:
		panic("invalid mode");
	}
	be_emit_char('%');
	be_emit_string(name);
}
Beispiel #7
0
static void amd64_emit_mode_suffix(const ir_mode *mode)
{
	assert(mode_is_int(mode) || mode_is_reference(mode));
	char c;
	switch (get_mode_size_bits(mode)) {
	case 8:  c = 'b'; break;
	case 16: c = 'w'; break;
	case 32: c = 'l'; break;
	case 64: c = 'q'; break;
	default:
		panic("Can't output mode_suffix for %+F", mode);
	}
	be_emit_char(c);
}
Beispiel #8
0
static unsigned determine_n_float_regs(ir_mode *mode)
{
	unsigned bits = get_mode_size_bits(mode);
	switch (bits) {
	case 32:
		return 1;
	case 64:
		return 2;
	case 128:
		return 4;
	default:
		panic("unexpected floatingpoint mode %+F", mode);
	}
}
static bool tarval_possible(ir_tarval *tv)
{
	ir_mode *mode = get_tarval_mode(tv);
	if (get_mode_size_bits(mode) <= 32) {
		assert(tarval_is_long(tv));
		return true;
	}

	if (!tarval_is_long(tv))
		return false;
	/* immediates on x86_64 are at most 32bit and get sign extended */
	long    val   = get_tarval_long(tv);
	int32_t val32 = (long)val;
	return val == (long)val32;
}
Beispiel #10
0
/* Evaluate the costs of an instruction. */
int ia32_evaluate_insn(insn_kind kind, const ir_mode *mode, ir_tarval *tv)
{
	int cost;

	switch (kind) {
	case MUL:
		cost = arch_costs->cost_mul_start;
		if (arch_costs->cost_mul_bit > 0)
			cost += get_tarval_popcount(tv) * arch_costs->cost_mul_bit;
		if (get_mode_size_bits(mode) <= 32)
			return cost;
		/* 64bit mul supported, approx 4times of a 32bit mul*/
		return 4 * cost;
	case LEA:
		/* lea is only supported for 32 bit */
		if (get_mode_size_bits(mode) <= 32)
			return arch_costs->lea_cost;
		/* in 64bit mode, the Lea cost are at worst 2 shifts and one add */
		return 2 * arch_costs->add_cost + 2 * (2 * arch_costs->const_shf_cost);
	case ADD:
	case SUB:
		if (get_mode_size_bits(mode) <= 32)
			return arch_costs->add_cost;
		/* 64bit add/sub supported, double the cost */
		return 2 * arch_costs->add_cost;
	case SHIFT:
		if (get_mode_size_bits(mode) <= 32)
			return arch_costs->const_shf_cost;
		/* 64bit shift supported, double the cost */
		return 2 * arch_costs->const_shf_cost;
	case ZERO:
		return arch_costs->add_cost;
	default:
		return 1;
	}
}
Beispiel #11
0
/**
 * Returns the condensed representation of the tarval tv
 */
static unsigned char *value_to_condensed(mul_env *env, ir_tarval *tv, int *pr)
{
	ir_mode       *mode = get_tarval_mode(tv);
	unsigned       bits = get_mode_size_bits(mode);
	unsigned char *R    = OALLOCN(&env->obst, unsigned char, bits);

	int r = 0;
	for (unsigned i = 0, l = 0; i < bits; ++i) {
		if (tarval_get_bit(tv, i)) {
			R[r] = i - l;
			l = i;
			++r;
		}
	}

	*pr = r;
	return R;
}
Beispiel #12
0
static ir_node *alloc_dims_array(unsigned dims, ir_node **sizes)
{
	ir_mode *dim_mode   = mode_ushort;
	unsigned bytes      = dims * (get_mode_size_bits(dim_mode) / 8);
	ir_node *dims_const = new_Const_long(dim_mode, bytes);
	ir_node *mem        = get_store();
	ir_node *alloc      = new_Alloc(mem, dims_const, 1);
	ir_node *arr        = new_Proj(alloc, mode_reference, pn_Alloc_res);
	ir_node *new_mem    = new_Proj(alloc, mode_M, pn_Alloc_M);

	for (unsigned d = 0; d < dims; d++) {
		ir_node *index_const = new_Const_long(mode_int, d);
		ir_node *sel         = new_Sel(arr, index_const, type_array_int);
		ir_node *store       = new_Store(new_mem, sel, sizes[d], type_array_int, cons_none);
		new_mem = new_Proj(store, mode_M, pn_Store_M);
	}

	set_store(new_mem);
	return arr;
}
Beispiel #13
0
bool enum_bitfield_big_enough(enum_t *enume, type_t *base_type,
                              unsigned bitfield_size)
{
	ir_mode    *mode        = get_ir_mode_storage(base_type);
	ir_tarval  *max         = get_mode_max(mode);
	ir_tarval  *min         = get_mode_min(mode);
	bool       is_signed    = is_type_signed(base_type);
	unsigned   mode_size    = get_mode_size_bits(mode);
	unsigned   shift_amount = mode_size - bitfield_size + is_signed;
	ir_tarval *adjusted_max;
	ir_tarval *adjusted_min;
	/* corner case: signed mode with just sign bit results in shift_amount
	 * being as big as mode_size triggering "modulo shift" which is not what
	 * we want here. */
	if (shift_amount >= mode_size) {
		assert(bitfield_size == 1 && mode_is_signed(mode));
		adjusted_max = get_mode_null(mode);
		adjusted_min = get_mode_all_one(mode);
	} else {
		adjusted_max = tarval_shr_unsigned(max, shift_amount);
		adjusted_min = tarval_shrs_unsigned(min, shift_amount);
	}

	for (entity_t *entry = enume->first_value;
	     entry != NULL && entry->kind == ENTITY_ENUM_VALUE;
	     entry = entry->base.next) {
		ir_tarval *tv = get_enum_value(&entry->enum_value);
		if (tv == NULL)
			continue;
		ir_tarval *tvc = tarval_convert_to(tv, mode);
		if (tarval_cmp(tvc, adjusted_min) == ir_relation_less
		 || tarval_cmp(tvc, adjusted_max) == ir_relation_greater) {
			return false;
		}
	}
	return true;
}
Beispiel #14
0
/**
 * @return The type of the function replacing the given node.
 */
static ir_type *get_softfloat_type(const ir_node *n)
{
	ir_node *operand      = get_irn_n(n, 0);
	ir_mode *operand_mode = get_irn_mode(operand);

	switch (get_irn_opcode(n)) {
	case iro_Div:
		operand_mode = get_irn_mode(get_Div_left(n));
		/* fall through */
	case iro_Add:
	case iro_Mul:
	case iro_Sub:
		if (operand_mode == mode_F)
			return binop_tp_f;
		else if (operand_mode == mode_D)
			return binop_tp_d;
		break;
	case iro_Cmp:
		if (operand_mode == mode_F)
			return cmp_tp_f;
		else if (operand_mode == mode_D)
			return cmp_tp_d;
		break;

	case iro_Conv: {
		ir_mode *const mode = get_irn_mode(n);
		if (operand_mode == mode_D) {
			if (mode == mode_F)
				return unop_tp_d_f;
			else if (get_mode_arithmetic(mode) == irma_twos_complement) {
				if (get_mode_size_bits(mode) <= 32)
					return mode_is_signed(mode) ? unop_tp_d_is : unop_tp_d_iu;
				else if (get_mode_size_bits(mode) == 64)
					return mode_is_signed(mode) ? unop_tp_d_ls : unop_tp_d_lu;
			}
		} else if (operand_mode == mode_F) {
			if (mode == mode_D)
				return unop_tp_f_d;
			else if (get_mode_arithmetic(mode) == irma_twos_complement) {
				if (get_mode_size_bits(mode) <= 32)
					return mode_is_signed(mode) ? unop_tp_f_is : unop_tp_f_iu;
				else if (get_mode_size_bits(mode) == 64)
					return mode_is_signed(mode) ? unop_tp_f_ls : unop_tp_f_lu;
			}
		} else if (get_mode_arithmetic(operand_mode) == irma_twos_complement) {
			if (mode_is_signed(operand_mode)) {
				if (get_mode_size_bits(operand_mode) <= 32) {
					if (mode == mode_D)
						return unop_tp_is_d;
					else if (mode == mode_F)
						return unop_tp_is_f;
				} else if (get_mode_size_bits(operand_mode) == 64) {
					if (mode == mode_D)
						return unop_tp_ls_d;
					else if (mode == mode_F)
						return unop_tp_ls_f;
				}
			} else {
				if (get_mode_size_bits(operand_mode) <= 32) {
					if (mode == mode_D)
						return unop_tp_iu_d;
					else if (mode == mode_F)
						return unop_tp_iu_f;
				} else if (get_mode_size_bits(operand_mode) == 64) {
					if (mode == mode_D)
						return unop_tp_lu_d;
					else if (mode == mode_F)
						return unop_tp_lu_f;
				}
			}
		}
		break;
	}

	case iro_Minus:
		if (operand_mode == mode_F)
			return unop_tp_f;
		else if (operand_mode == mode_D)
			return unop_tp_d;
		break;
	default: break;
	}

	panic("could not determine a suitable type");
}
Beispiel #15
0
/**
 * lower 64bit conversions
 */
static void ia32_lower_conv64(ir_node *node, ir_mode *mode)
{
	dbg_info  *dbg       = get_irn_dbg_info(node);
	ir_node   *op        = get_Conv_op(node);
	ir_mode   *mode_from = get_irn_mode(op);
	ir_mode   *mode_to   = get_irn_mode(node);

	if (mode_is_float(mode_from) && get_mode_size_bits(mode_to) == 64
	    && get_mode_arithmetic(mode_to) == irma_twos_complement) {
		/* We have a Conv float -> long long here */
		ir_node *float_to_ll;
		ir_node *l_res;
		ir_node *h_res;
		if (mode_is_signed(mode)) {
			/* convert from float to signed 64bit */
			ir_node *block = get_nodes_block(node);
			float_to_ll = new_bd_ia32_l_FloattoLL(dbg, block, op);
			l_res = new_r_Proj(float_to_ll, ia32_mode_gp,
			                   pn_ia32_l_FloattoLL_res_low);
			h_res = new_r_Proj(float_to_ll, mode,
							   pn_ia32_l_FloattoLL_res_high);
		} else {
			/* Convert from float to unsigned 64bit. */
			ir_graph  *irg = get_irn_irg(node);
			ir_tarval *flt_tv
				= new_tarval_from_str("9223372036854775808", 19, x86_mode_E);
			ir_node   *flt_corr  = new_r_Const(irg, flt_tv);

			ir_node *lower_blk = part_block_dw(node);
			ir_node *upper_blk = get_nodes_block(node);
			set_dw_control_flow_changed();

			ir_node *opc  = new_rd_Conv(dbg, upper_blk, op, x86_mode_E);
			ir_node *cmp  = new_rd_Cmp(dbg, upper_blk, opc, flt_corr,
			                           ir_relation_less);
			ir_node *cond = new_rd_Cond(dbg, upper_blk, cmp);
			ir_node *in[] = {
				new_r_Proj(cond, mode_X, pn_Cond_true),
				new_r_Proj(cond, mode_X, pn_Cond_false)
			};
			ir_node *blk   = new_r_Block(irg, 1, &in[1]);
			in[1] = new_r_Jmp(blk);

			set_irn_in(lower_blk, 2, in);

			/* create to Phis */
			ir_node *phi_in[] = {
				new_r_Const_null(irg, mode),
				new_r_Const_long(irg, mode, 0x80000000)
			};
			ir_node *int_phi
				= new_r_Phi(lower_blk, ARRAY_SIZE(phi_in), phi_in, mode);

			ir_node *fphi_in[] = {
				opc,
				new_rd_Sub(dbg, upper_blk, opc, flt_corr, x86_mode_E)
			};
			ir_node *flt_phi
				= new_r_Phi(lower_blk, ARRAY_SIZE(fphi_in), fphi_in,
				            x86_mode_E);

			/* fix Phi links for next part_block() */
			if (is_Phi(int_phi))
				add_Block_phi(lower_blk, int_phi);
			if (is_Phi(flt_phi))
				add_Block_phi(lower_blk, flt_phi);

			float_to_ll = new_bd_ia32_l_FloattoLL(dbg, lower_blk, flt_phi);
			l_res = new_r_Proj(float_to_ll, ia32_mode_gp,
							   pn_ia32_l_FloattoLL_res_low);
			h_res = new_r_Proj(float_to_ll, mode,
							   pn_ia32_l_FloattoLL_res_high);
			h_res = new_rd_Add(dbg, lower_blk, h_res, int_phi, mode);

			/* move the call and its Proj's to the lower block */
			set_nodes_block(node, lower_blk);
			for (ir_node *proj = (ir_node*)get_irn_link(node); proj != NULL;
			     proj = (ir_node*)get_irn_link(proj)) {
				set_nodes_block(proj, lower_blk);
			}
		}
		ir_set_dw_lowered(node, l_res, h_res);
	} else if (get_mode_size_bits(mode_from) == 64
	           && get_mode_arithmetic(mode_from) == irma_twos_complement
	           && mode_is_float(mode_to)) {
		/* We have a Conv long long -> float here */
		ir_node *op_low  = get_lowered_low(op);
		ir_node *op_high = get_lowered_high(op);
		ir_node *block   = get_nodes_block(node);
		ir_node *ll_to_float
			= new_bd_ia32_l_LLtoFloat(dbg, block, op_high, op_low, mode_to);

		exchange(node, ll_to_float);
	} else {
		ir_default_lower_dw_Conv(node, mode);
	}
}
Beispiel #16
0
/** check, whether a mode allows a Mulh instruction. */
static int allow_Mulh(const ir_settings_arch_dep_t *params, ir_mode *mode)
{
	if (get_mode_size_bits(mode) > params->max_bits_for_mulh)
		return 0;
	return mode_is_signed(mode) ? params->allow_mulhs : params->allow_mulhu;
}
Beispiel #17
0
int smaller_mode(const ir_mode *sm, const ir_mode *lm)
{
    int sm_bits, lm_bits;

    assert(sm);
    assert(lm);

    if (sm == lm) return 1;

    sm_bits = get_mode_size_bits(sm);
    lm_bits = get_mode_size_bits(lm);

    switch (get_mode_sort(sm)) {
    case irms_int_number:
        switch (get_mode_sort(lm)) {
        case irms_int_number:
            if (get_mode_arithmetic(sm) != get_mode_arithmetic(lm))
                return 0;

            /* only two complement implemented */
            assert(get_mode_arithmetic(sm) == irma_twos_complement);

            /* integers are convertable if
             *   - both have the same sign and lm is the larger one
             *   - lm is the signed one and is at least two bits larger
             *     (one for the sign, one for the highest bit of sm)
             *   - sm & lm are two_complement and lm has greater or equal number of bits
             */
            if (mode_is_signed(sm)) {
                if (!mode_is_signed(lm))
                    return 0;
                return sm_bits <= lm_bits;
            } else {
                if (mode_is_signed(lm)) {
                    return sm_bits < lm_bits;
                }
                return sm_bits <= lm_bits;
            }

        case irms_float_number:
            /* int to float works if the float is large enough */
            return 0;

        default:
            break;
        }
        break;

    case irms_float_number:
        if (get_mode_arithmetic(sm) == get_mode_arithmetic(lm)) {
            if ( (get_mode_sort(lm) == irms_float_number)
                    && (get_mode_size_bits(lm) >= get_mode_size_bits(sm)) )
                return 1;
        }
        break;

    case irms_reference:
        /* do exist machines out there with different pointer lengths ?*/
        return 0;

    case irms_internal_boolean:
        return mode_is_int(lm);

    default:
        break;
    }

    /* else */
    return 0;
}
Beispiel #18
0
void amd64_emitf(ir_node const *const node, char const *fmt, ...)
{
	va_list ap;
	va_start(ap, fmt);

	be_emit_char('\t');
	for (;;) {
		char const *start = fmt;

		while (*fmt != '%' && *fmt != '\n' && *fmt != '\0')
			++fmt;
		if (fmt != start) {
			be_emit_string_len(start, fmt - start);
		}

		if (*fmt == '\n') {
			be_emit_char('\n');
			be_emit_write_line();
			be_emit_char('\t');
			++fmt;
			continue;
		}

		if (*fmt == '\0')
			break;

		++fmt;
		amd64_emit_mod_t mod = EMIT_NONE;
		for (;;) {
			switch (*fmt) {
			case '#': mod |= EMIT_RESPECT_LS;  break;
			case '^': mod |= EMIT_IGNORE_MODE; break;
			default:
				goto end_of_mods;
			}
			++fmt;
		}
end_of_mods:

		switch (*fmt++) {
			arch_register_t const *reg;

			case '%':
				be_emit_char('%');
				break;

			case 'C': {
				amd64_attr_t const *const attr = get_amd64_attr_const(node);
				/* FIXME: %d is a hack... we must emit 64bit constants, or sign
				 * extended 32bit constants... */
				be_emit_irprintf("$%d", attr->ext.imm_value);
				break;
			}

			case 'D':
				if (*fmt < '0' || '9' <= *fmt)
					goto unknown;
				reg = arch_get_irn_register_out(node, *fmt++ - '0');
				goto emit_R;

			case 'E': {
				ir_entity const *const ent = va_arg(ap, ir_entity const*);
				be_gas_emit_entity(ent);
				break;
			}

			case 'L': {
				ir_node *const block = get_cfop_target_block(node);
				be_gas_emit_block_name(block);
				break;
			}

			case 'O': {
				amd64_SymConst_attr_t const *const attr = get_amd64_SymConst_attr_const(node);
				if (attr->fp_offset)
					be_emit_irprintf("%d", attr->fp_offset);
				break;
			}

			case 'R':
				reg = va_arg(ap, arch_register_t const*);
emit_R:
				if (mod & EMIT_IGNORE_MODE) {
					emit_register(reg);
				} else {
					amd64_attr_t const *const attr = get_amd64_attr_const(node);
					if (mod & EMIT_RESPECT_LS) {
						emit_register_mode(reg, attr->ls_mode);
					} else {
						emit_register_insn_mode(reg, attr->data.insn_mode);
					}
				}
				break;

			case 'S': {
				int pos;
				if ('0' <= *fmt && *fmt <= '9') {
					pos = *fmt++ - '0';
				} else {
					goto unknown;
				}
				reg = arch_get_irn_register_in(node, pos);
				goto emit_R;
			}

			case 'M': {
				amd64_attr_t const *const attr = get_amd64_attr_const(node);
				if (mod & EMIT_RESPECT_LS) {
					amd64_emit_mode_suffix(attr->ls_mode);
				} else {
					amd64_emit_insn_mode_suffix(attr->data.insn_mode);
				}
				break;
			}

			case 'd': {
				int const num = va_arg(ap, int);
				be_emit_irprintf("%d", num);
				break;
			}

			case 's': {
				char const *const str = va_arg(ap, char const*);
				be_emit_string(str);
				break;
			}

			case 'u': {
				unsigned const num = va_arg(ap, unsigned);
				be_emit_irprintf("%u", num);
				break;
			}

			case 'c': {
				amd64_attr_t const *const attr = get_amd64_attr_const(node);
				ir_mode                  *mode = attr->ls_mode;
				if (get_mode_size_bits(mode) == 64)
					break;
				if (get_mode_size_bits(mode) == 32 && !mode_is_signed(mode)
				 && attr->data.insn_mode == INSN_MODE_32)
					break;
				be_emit_char(mode_is_signed(mode) ? 's' : 'z');
				amd64_emit_mode_suffix(mode);
				break;
			}

			default:
unknown:
				panic("unknown format conversion");
		}
	}

	be_emit_finish_line_gas(node);
	va_end(ap);
}
Beispiel #19
0
/**
 * Lower a Sel node. Do not touch Sels accessing entities on the frame type.
 */
static void lower_sel(ir_node *sel)
{
	ir_graph  *irg   = get_irn_irg(sel);
	ir_entity *ent   = get_Sel_entity(sel);
	ir_type   *owner = get_entity_owner(ent);
	dbg_info  *dbg   = get_irn_dbg_info(sel);
	ir_mode   *mode  = get_irn_mode(sel);
	ir_node   *bl    = get_nodes_block(sel);
	ir_node   *newn;

	/* we can only replace Sels when the layout of the owner type is decided. */
	if (get_type_state(owner) != layout_fixed)
		return;

	if (0 < get_Sel_n_indexs(sel)) {
		/* an Array access */
		ir_type *basetyp = get_entity_type(ent);
		ir_mode *basemode;
		ir_node *index;
		if (is_Primitive_type(basetyp))
			basemode = get_type_mode(basetyp);
		else
			basemode = mode_P_data;

		assert(basemode && "no mode for lowering Sel");
		assert((get_mode_size_bits(basemode) % 8 == 0) && "can not deal with unorthodox modes");
		index = get_Sel_index(sel, 0);

		if (is_Array_type(owner)) {
			ir_type *arr_ty = owner;
			size_t   dims   = get_array_n_dimensions(arr_ty);
			size_t  *map    = ALLOCAN(size_t, dims);
			ir_mode *mode_Int = get_reference_mode_signed_eq(mode);
			ir_tarval *tv;
			ir_node *last_size;
			size_t   i;

			assert(dims == (size_t)get_Sel_n_indexs(sel)
				&& "array dimension must match number of indices of Sel node");

			for (i = 0; i < dims; i++) {
				size_t order = get_array_order(arr_ty, i);

				assert(order < dims &&
					"order of a dimension must be smaller than the arrays dim");
				map[order] = i;
			}
			newn = get_Sel_ptr(sel);

			/* Size of the array element */
			tv = new_tarval_from_long(get_type_size_bytes(basetyp), mode_Int);
			last_size = new_rd_Const(dbg, irg, tv);

			/*
			 * We compute the offset part of dimension d_i recursively
			 * with the the offset part of dimension d_{i-1}
			 *
			 *     off_0 = sizeof(array_element_type);
			 *     off_i = (u_i - l_i) * off_{i-1}  ; i >= 1
			 *
			 * whereas u_i is the upper bound of the current dimension
			 * and l_i the lower bound of the current dimension.
			 */
			for (i = dims; i > 0;) {
				size_t dim = map[--i];
				ir_node *lb, *ub, *elms, *n, *ind;

				elms = NULL;
				lb = get_array_lower_bound(arr_ty, dim);
				ub = get_array_upper_bound(arr_ty, dim);

				if (! is_Unknown(lb))
					lb = new_rd_Conv(dbg, bl, copy_const_value(get_irn_dbg_info(sel), lb, bl), mode_Int);
				else
					lb = NULL;

				if (! is_Unknown(ub))
					ub = new_rd_Conv(dbg, bl, copy_const_value(get_irn_dbg_info(sel), ub, bl), mode_Int);
				else
					ub = NULL;

				/*
				 * If the array has more than one dimension, lower and upper
				 * bounds have to be set in the non-last dimension.
				 */
				if (i > 0) {
					assert(lb != NULL && "lower bound has to be set in multi-dim array");
					assert(ub != NULL && "upper bound has to be set in multi-dim array");

					/* Elements in one Dimension */
					elms = new_rd_Sub(dbg, bl, ub, lb, mode_Int);
				}

				ind = new_rd_Conv(dbg, bl, get_Sel_index(sel, dim), mode_Int);

				/*
				 * Normalize index, id lower bound is set, also assume
				 * lower bound == 0
			 */
				if (lb != NULL)
					ind = new_rd_Sub(dbg, bl, ind, lb, mode_Int);

				n = new_rd_Mul(dbg, bl, ind, last_size, mode_Int);

				/*
				 * see comment above.
				 */
				if (i > 0)
					last_size = new_rd_Mul(dbg, bl, last_size, elms, mode_Int);

				newn = new_rd_Add(dbg, bl, newn, n, mode);
			}
		} else {
			/* no array type */
			ir_mode   *idx_mode = get_irn_mode(index);
			ir_tarval *tv       = new_tarval_from_long(get_mode_size_bytes(basemode), idx_mode);

			newn = new_rd_Add(dbg, bl, get_Sel_ptr(sel),
				new_rd_Mul(dbg, bl, index,
				new_r_Const(irg, tv),
				idx_mode),
				mode);
		}
	} else if (is_Method_type(get_entity_type(ent)) && is_Class_type(owner)) {
		/* We need an additional load when accessing methods from a dispatch
		 * table.
		 * Matze TODO: Is this really still used? At least liboo does its own
		 * lowering of Method-Sels...
		 */
		ir_mode   *ent_mode = get_type_mode(get_entity_type(ent));
		int        offset   = get_entity_offset(ent);
		ir_mode   *mode_Int = get_reference_mode_signed_eq(mode);
		ir_tarval *tv       = new_tarval_from_long(offset, mode_Int);
		ir_node   *cnst     = new_rd_Const(dbg, irg, tv);
		ir_node   *add      = new_rd_Add(dbg, bl, get_Sel_ptr(sel), cnst, mode);
		ir_node   *mem      = get_Sel_mem(sel);
		newn = new_rd_Load(dbg, bl, mem, add, ent_mode, cons_none);
		newn = new_r_Proj(newn, ent_mode, pn_Load_res);
	} else {
		int offset = get_entity_offset(ent);

		/* replace Sel by add(obj, const(ent.offset)) */
		newn = get_Sel_ptr(sel);
		if (offset != 0) {
			ir_mode   *mode_UInt = get_reference_mode_unsigned_eq(mode);
			ir_tarval *tv        = new_tarval_from_long(offset, mode_UInt);
			ir_node   *cnst      = new_r_Const(irg, tv);
			newn = new_rd_Add(dbg, bl, newn, cnst, mode);
		}
	}

	/* run the hooks */
	hook_lower(sel);

	exchange(sel, newn);
}
Beispiel #20
0
calling_convention_t *arm_decide_calling_convention(const ir_graph *irg,
                                                    ir_type *function_type)
{
	/* determine how parameters are passed */
	unsigned            stack_offset = 0;
	size_t const        n_param_regs = ARRAY_SIZE(param_regs);
	size_t const        n_params     = get_method_n_params(function_type);
	size_t              regnum       = 0;
	reg_or_stackslot_t *params       = XMALLOCNZ(reg_or_stackslot_t, n_params);

	for (size_t i = 0; i < n_params; ++i) {
		ir_type            *param_type = get_method_param_type(function_type,i);
		ir_mode            *mode       = get_type_mode(param_type);
		int                 bits       = get_mode_size_bits(mode);
		reg_or_stackslot_t *param      = &params[i];
		param->type = param_type;

		/* doubleword modes need to be passed in even registers */
		if (param_type->flags & tf_lowered_dw) {
			if (regnum < n_param_regs) {
				if ((regnum & 1) != 0)
					++regnum;
			} else {
				unsigned misalign = stack_offset % 8;
				if (misalign > 0)
					stack_offset += 8 - misalign;
			}
		}

		if (regnum < n_param_regs) {
			param->reg0 = param_regs[regnum++];
		} else {
			param->offset = stack_offset;
			/* increase offset 4 bytes so everything is aligned */
			stack_offset += MAX(bits / 8, 4);
			continue;
		}

		/* we might need a 2nd 32bit component (for 64bit or double values) */
		if (bits > 32) {
			if (bits > 64)
				panic("only 32 and 64bit modes supported");

			if (regnum < n_param_regs) {
				const arch_register_t *reg = param_regs[regnum++];
				param->reg1 = reg;
			} else {
				ir_mode *pmode = param_regs[0]->cls->mode;
				ir_type *type  = get_type_for_mode(pmode);
				param->type    = type;
				param->offset  = stack_offset;
				assert(get_mode_size_bits(pmode) == 32);
				stack_offset += 4;
			}
		}
	}
	unsigned const n_param_regs_used = regnum;

	size_t const        n_result_regs= ARRAY_SIZE(result_regs);
	size_t const n_float_result_regs = ARRAY_SIZE(float_result_regs);
	size_t              n_results    = get_method_n_ress(function_type);
	size_t              float_regnum = 0;
	reg_or_stackslot_t *results      = XMALLOCNZ(reg_or_stackslot_t, n_results);
	regnum = 0;
	for (size_t i = 0; i < n_results; ++i) {
		ir_type            *result_type = get_method_res_type(function_type, i);
		ir_mode            *result_mode = get_type_mode(result_type);
		reg_or_stackslot_t *result      = &results[i];

		if (mode_is_float(result_mode)) {
			if (float_regnum >= n_float_result_regs) {
				panic("too many float results");
			} else {
				const arch_register_t *reg = float_result_regs[float_regnum++];
				result->reg0 = reg;
			}
		} else {
			if (get_mode_size_bits(result_mode) > 32) {
				panic("results with more than 32bits not supported yet");
			}

			if (regnum >= n_result_regs) {
				panic("too many results");
			} else {
				const arch_register_t *reg = result_regs[regnum++];
				result->reg0 = reg;
			}
		}
	}

	calling_convention_t *cconv = XMALLOCZ(calling_convention_t);
	cconv->parameters       = params;
	cconv->n_parameters     = n_params;
	cconv->param_stack_size = stack_offset;
	cconv->n_param_regs     = n_param_regs_used;
	cconv->results          = results;

	/* setup allocatable registers */
	if (irg != NULL) {
		be_irg_t *birg = be_birg_from_irg(irg);

		assert(birg->allocatable_regs == NULL);
		birg->allocatable_regs = be_cconv_alloc_all_regs(&birg->obst, N_ARM_REGISTERS);
		be_cconv_rem_regs(birg->allocatable_regs, ignore_regs, ARRAY_SIZE(ignore_regs));
		arm_get_irg_data(irg)->omit_fp = true;
	}

	return cconv;
}
Beispiel #21
0
x86_cconv_t *amd64_decide_calling_convention(ir_type *function_type,
                                             ir_graph *irg)
{
	bool omit_fp = false;
	if (irg != NULL) {
		omit_fp = be_options.omit_fp;
		if (omit_fp)
			irg_walk_graph(irg, check_omit_fp, NULL, &omit_fp);
		amd64_get_irg_data(irg)->omit_fp = omit_fp;
	}

	unsigned *caller_saves = rbitset_malloc(N_AMD64_REGISTERS);
	unsigned *callee_saves = rbitset_malloc(N_AMD64_REGISTERS);
	rbitset_copy(caller_saves, default_caller_saves, N_AMD64_REGISTERS);
	rbitset_copy(callee_saves, default_callee_saves, N_AMD64_REGISTERS);

	/* determine how parameters are passed */
	size_t              n_params           = get_method_n_params(function_type);
	size_t              param_regnum       = 0;
	size_t              float_param_regnum = 0;
	reg_or_stackslot_t *params             = XMALLOCNZ(reg_or_stackslot_t,
	                                                   n_params);
	/* x64 always reserves space to spill the first 4 arguments to have it
	 * easy in case of variadic functions. */
	unsigned stack_offset = amd64_use_x64_abi ? 32 : 0;
	for (size_t i = 0; i < n_params; ++i) {
		ir_type *param_type = get_method_param_type(function_type,i);
		if (is_compound_type(param_type))
			panic("compound arguments NIY");

		ir_mode *mode = get_type_mode(param_type);
		int      bits = get_mode_size_bits(mode);
		reg_or_stackslot_t *param = &params[i];

		if (mode_is_float(mode) && float_param_regnum < n_float_param_regs
		    && mode != x86_mode_E) {
			param->reg = float_param_regs[float_param_regnum++];
			if (amd64_use_x64_abi) {
				++param_regnum;
			}
		} else if (!mode_is_float(mode) && param_regnum < n_param_regs) {
			param->reg = param_regs[param_regnum++];
			if (amd64_use_x64_abi) {
				++float_param_regnum;
			}
		} else {
			param->type   = param_type;
			param->offset = stack_offset;
			/* increase offset by at least AMD64_REGISTER_SIZE bytes so
			 * everything is aligned */
			stack_offset += round_up2(bits / 8, AMD64_REGISTER_SIZE);
		}
	}

	/* If the function is variadic, we add all unused parameter
	 * passing registers to the end of the params array, first GP,
	 * then XMM. */
	if (irg && is_method_variadic(function_type)) {
		if (amd64_use_x64_abi) {
			panic("Variadic functions on Windows ABI not supported");
		}

		int params_remaining = (n_param_regs - param_regnum) +
			(n_float_param_regs - float_param_regnum);
		params = XREALLOC(params, reg_or_stackslot_t, n_params + params_remaining);
		size_t i = n_params;

		for (; param_regnum < n_param_regs; param_regnum++, i++) {
			params[i].reg = param_regs[param_regnum];
		}

		for (; float_param_regnum < n_float_param_regs; float_param_regnum++, i++) {
			params[i].reg = float_param_regs[float_param_regnum];
		}
	}

	unsigned n_param_regs_used
		= amd64_use_x64_abi ? param_regnum : param_regnum + float_param_regnum;

	/* determine how results are passed */
	size_t              n_results           = get_method_n_ress(function_type);
	unsigned            n_reg_results       = 0;
	reg_or_stackslot_t *results = XMALLOCNZ(reg_or_stackslot_t, n_results);
	unsigned            res_regnum          = 0;
	unsigned            res_float_regnum    = 0;
	unsigned            res_x87_regnum      = 0;
	size_t              n_result_regs       = ARRAY_SIZE(result_regs);
	size_t              n_float_result_regs = ARRAY_SIZE(float_result_regs);
	size_t              n_x87_result_regs   = ARRAY_SIZE(x87_result_regs);
	for (size_t i = 0; i < n_results; ++i) {
		ir_type            *result_type = get_method_res_type(function_type, i);
		ir_mode            *result_mode = get_type_mode(result_type);
		reg_or_stackslot_t *result      = &results[i];

		const arch_register_t *reg;
		if (result_mode == x86_mode_E) {
			if (res_x87_regnum >= n_x87_result_regs)
				panic("too manu x87 floating point results");
			reg = x87_result_regs[res_x87_regnum++];
		} else if (mode_is_float(result_mode)) {
			if (res_float_regnum >= n_float_result_regs) {
				panic("too many floating points results");
			}
			reg = float_result_regs[res_float_regnum++];
		} else {
			if (res_regnum >= n_result_regs) {
				panic("too many results");
			}
			reg = result_regs[res_regnum++];
		}
		result->reg = reg;
		rbitset_clear(caller_saves, reg->global_index);
		++n_reg_results;
	}

	x86_cconv_t *cconv     = XMALLOCZ(x86_cconv_t);
	cconv->parameters      = params;
	cconv->n_parameters    = n_params;
	cconv->param_stacksize = stack_offset;
	cconv->n_param_regs    = n_param_regs_used;
	cconv->n_xmm_regs      = float_param_regnum;
	cconv->results         = results;
	cconv->omit_fp         = omit_fp;
	cconv->caller_saves    = caller_saves;
	cconv->callee_saves    = callee_saves;
	cconv->n_reg_results   = n_reg_results;

	if (irg != NULL) {
		be_irg_t *birg = be_birg_from_irg(irg);

		birg->allocatable_regs = be_cconv_alloc_all_regs(&birg->obst, N_AMD64_REGISTERS);
		be_cconv_rem_regs(birg->allocatable_regs, ignore_regs, ARRAY_SIZE(ignore_regs));
		if (!omit_fp)
			rbitset_clear(birg->allocatable_regs, REG_RBP);
	}

	return cconv;
}
Beispiel #22
0
calling_convention_t *sparc_decide_calling_convention(ir_type *function_type,
                                                      ir_graph *irg)
{
	bool omit_fp = false;
	if (irg != NULL) {
		omit_fp = be_options.omit_fp;
		/* our current vaarg handling needs the standard space to store the
		 * args 0-5 in it */
		if (is_method_variadic(function_type))
			omit_fp = false;
		/* The pointer to the aggregate return value belongs to the 92 magic bytes.
		 * Thus, if the called functions increases the stack size,
		 * it must copy the value to the appropriate location.
		 * This is not implemented yet, so we forbid to omit the frame pointer.
		 */
		if (get_method_calling_convention(function_type) & cc_compound_ret)
			omit_fp = false;
		if (omit_fp)
			irg_walk_graph(irg, check_omit_fp, NULL, &omit_fp);
		sparc_get_irg_data(irg)->omit_fp = omit_fp;
	}

	mtp_additional_properties mtp
		= get_method_additional_properties(function_type);
	unsigned *caller_saves = rbitset_malloc(N_SPARC_REGISTERS);
	if (mtp & mtp_property_returns_twice) {
		rbitset_copy(caller_saves, default_returns_twice_saves,
		             N_SPARC_REGISTERS);
	} else {
		rbitset_copy(caller_saves, default_caller_saves, N_SPARC_REGISTERS);
	}

	/* determine how parameters are passed */
	int                 n_params = get_method_n_params(function_type);
	int                 regnum   = 0;
	reg_or_stackslot_t *params   = XMALLOCNZ(reg_or_stackslot_t, n_params);

	int      n_param_regs = ARRAY_SIZE(param_regs);
	unsigned stack_offset = !omit_fp ? SPARC_MIN_STACKSIZE : 0;
	for (int i = 0; i < n_params; ++i) {
		ir_type            *param_type = get_method_param_type(function_type,i);
		ir_mode            *mode;
		int                 bits;
		reg_or_stackslot_t *param;

		if (is_compound_type(param_type))
			panic("compound arguments not supported yet");

		mode  = get_type_mode(param_type);
		bits  = get_mode_size_bits(mode);
		param = &params[i];

		if (i == 0 &&
		    (get_method_calling_convention(function_type) & cc_compound_ret)) {
			assert(mode_is_reference(mode) && bits == 32);
			/* special case, we have reserved space for this on the between
			 * type */
			param->type   = param_type;
			param->offset = SPARC_AGGREGATE_RETURN_OFFSET;
			param->already_stored = true;
			continue;
		}

		if (regnum < n_param_regs) {
			param->offset = SPARC_PARAMS_SPILL_OFFSET
			                + regnum * SPARC_REGISTER_SIZE;
			param->type   = param_type;
			arch_register_t const *reg = param_regs[regnum++];
			if (irg == NULL || omit_fp)
				reg = map_i_to_o_reg(reg);
			param->reg0 = reg;
			param->req0 = reg->single_req;
		} else {
			param->type   = param_type;
			param->offset = stack_offset;
			param->already_stored = true;
			/* increase offset by at least SPARC_REGISTER_SIZE bytes so
			 * everything is aligned */
			stack_offset += MAX(bits / 8, SPARC_REGISTER_SIZE);
			continue;
		}

		/* we might need a 2nd 32bit component (for 64bit or double values) */
		if (bits > 32) {
			if (bits > 64)
				panic("only 32 and 64bit modes supported");

			if (regnum < n_param_regs) {
				param->offset = SPARC_PARAMS_SPILL_OFFSET
				                + regnum * SPARC_REGISTER_SIZE;
				arch_register_t const *reg = param_regs[regnum++];
				if (irg == NULL || omit_fp)
					reg = map_i_to_o_reg(reg);
				param->reg1 = reg;
				param->req1 = reg->single_req;
			} else {
				ir_mode *regmode = param_regs[0]->cls->mode;
				ir_type *type    = get_type_for_mode(regmode);
				param->type      = type;
				param->offset    = stack_offset;
				assert(get_mode_size_bits(regmode) == 32);
				stack_offset += SPARC_REGISTER_SIZE;
			}
		}
	}
	unsigned n_param_regs_used = regnum;

	/* determine how results are passed */
	int                 n_results           = get_method_n_ress(function_type);
	unsigned            float_regnum        = 0;
	unsigned            n_reg_results       = 0;
	unsigned            n_float_result_regs = ARRAY_SIZE(float_result_regs);
	reg_or_stackslot_t *results = XMALLOCNZ(reg_or_stackslot_t, n_results);
	regnum        = 0;
	for (int i = 0; i < n_results; ++i) {
		ir_type            *result_type = get_method_res_type(function_type, i);
		ir_mode            *result_mode = get_type_mode(result_type);
		reg_or_stackslot_t *result      = &results[i];

		if (mode_is_float(result_mode)) {
			unsigned n_regs   = determine_n_float_regs(result_mode);
			unsigned next_reg = round_up2(float_regnum, n_regs);

			if (next_reg >= n_float_result_regs) {
				panic("too many float results");
			} else {
				const arch_register_t *reg = float_result_regs[next_reg];
				rbitset_clear(caller_saves, reg->global_index);
				if (n_regs == 1) {
					result->req0 = reg->single_req;
				} else if (n_regs == 2) {
					result->req0 = &float_result_reqs_double[next_reg];
					rbitset_clear(caller_saves, reg->global_index+1);
				} else if (n_regs == 4) {
					result->req0 = &float_result_reqs_quad[next_reg];
					rbitset_clear(caller_saves, reg->global_index+1);
					rbitset_clear(caller_saves, reg->global_index+2);
					rbitset_clear(caller_saves, reg->global_index+3);
				} else {
					panic("invalid number of registers in result");
				}
				float_regnum = next_reg + n_regs;

				++n_reg_results;
			}
		} else {
			if (get_mode_size_bits(result_mode) > 32) {
				panic("results with more than 32bits not supported yet");
			}

			if (regnum >= n_param_regs) {
				panic("too many results");
			} else {
				const arch_register_t *reg = param_regs[regnum++];
				if (irg == NULL || omit_fp)
					reg = map_i_to_o_reg(reg);
				result->req0 = reg->single_req;
				rbitset_clear(caller_saves, reg->global_index);
				++n_reg_results;
			}
		}
	}

	calling_convention_t *cconv = XMALLOCZ(calling_convention_t);
	cconv->n_parameters     = n_params;
	cconv->parameters       = params;
	cconv->param_stack_size = stack_offset - SPARC_MIN_STACKSIZE;
	cconv->n_param_regs     = n_param_regs_used;
	cconv->results          = results;
	cconv->omit_fp          = omit_fp;
	cconv->caller_saves     = caller_saves;
	cconv->n_reg_results    = n_reg_results;

	/* setup ignore register array */
	if (irg != NULL) {
		be_irg_t *birg = be_birg_from_irg(irg);

		birg->allocatable_regs = be_cconv_alloc_all_regs(&birg->obst, N_SPARC_REGISTERS);
		be_cconv_rem_regs(birg->allocatable_regs, ignore_regs, ARRAY_SIZE(ignore_regs));
	}

	return cconv;
}
Beispiel #23
0
x86_cconv_t *amd64_decide_calling_convention(ir_type *function_type,
                                             ir_graph *irg)
{
	bool omit_fp = false;
	if (irg != NULL) {
		omit_fp = be_options.omit_fp;
		if (omit_fp)
			irg_walk_graph(irg, check_omit_fp, NULL, &omit_fp);
	}

	mtp_additional_properties mtp
		= get_method_additional_properties(function_type);
	unsigned *caller_saves = rbitset_malloc(N_AMD64_REGISTERS);
	unsigned *callee_saves = rbitset_malloc(N_AMD64_REGISTERS);
	if (mtp & mtp_property_returns_twice)
		panic("amd64: returns_twice calling convention NIY");
	rbitset_copy(caller_saves, default_caller_saves, N_AMD64_REGISTERS);
	rbitset_copy(callee_saves, default_callee_saves, N_AMD64_REGISTERS);

	/* determine how parameters are passed */
	size_t              n_params           = get_method_n_params(function_type);
	size_t              param_regnum       = 0;
	size_t              float_param_regnum = 0;
	reg_or_stackslot_t *params             = XMALLOCNZ(reg_or_stackslot_t,
	                                                   n_params);
	/* x64 always reserves space to spill the first 4 arguments to have it
	 * easy in case of variadic functions. */
	unsigned stack_offset = amd64_use_x64_abi ? 32 : 0;
	for (size_t i = 0; i < n_params; ++i) {
		ir_type *param_type = get_method_param_type(function_type,i);
		if (is_compound_type(param_type))
			panic("amd64: compound arguments NIY");

		ir_mode *mode = get_type_mode(param_type);
		int      bits = get_mode_size_bits(mode);
		reg_or_stackslot_t *param = &params[i];

		if (mode_is_float(mode) && float_param_regnum < n_float_param_regs) {
			param->reg = float_param_regs[float_param_regnum++];
			if (amd64_use_x64_abi)
				++param_regnum;
		} else if (!mode_is_float(mode) && param_regnum < n_param_regs) {
			param->reg = param_regs[param_regnum++];
			if (amd64_use_x64_abi)
				++float_param_regnum;
		} else {
			param->type   = param_type;
			param->offset = stack_offset;
			/* increase offset by at least AMD64_REGISTER_SIZE bytes so
			 * everything is aligned */
			stack_offset += MAX(bits / 8, AMD64_REGISTER_SIZE);
			continue;
		}

	}

	unsigned n_param_regs_used
		= amd64_use_x64_abi ? param_regnum : param_regnum + float_param_regnum;

	/* determine how results are passed */
	size_t              n_results           = get_method_n_ress(function_type);
	unsigned            n_reg_results       = 0;
	reg_or_stackslot_t *results = XMALLOCNZ(reg_or_stackslot_t, n_results);
	unsigned            res_regnum          = 0;
	unsigned            res_float_regnum    = 0;
	size_t              n_result_regs       = ARRAY_SIZE(result_regs);
	size_t              n_float_result_regs = ARRAY_SIZE(float_result_regs);
	for (size_t i = 0; i < n_results; ++i) {
		ir_type            *result_type = get_method_res_type(function_type, i);
		ir_mode            *result_mode = get_type_mode(result_type);
		reg_or_stackslot_t *result      = &results[i];

		const arch_register_t *reg;
		if (mode_is_float(result_mode)) {
			if (res_float_regnum >= n_float_result_regs) {
				panic("too many floating points results");
			}
			reg = float_result_regs[res_float_regnum++];
		} else {
			if (res_regnum >= n_result_regs) {
				panic("too many results");
			}
			reg = result_regs[res_regnum++];
		}
		result->reg = reg;
		rbitset_clear(caller_saves, reg->global_index);
		++n_reg_results;
	}

	x86_cconv_t *cconv    = XMALLOCZ(x86_cconv_t);
	cconv->parameters     = params;
	cconv->callframe_size = stack_offset;
	cconv->n_param_regs   = n_param_regs_used;
	cconv->n_xmm_regs     = float_param_regnum;
	cconv->results        = results;
	cconv->omit_fp        = omit_fp;
	cconv->caller_saves   = caller_saves;
	cconv->callee_saves   = callee_saves;
	cconv->n_reg_results  = n_reg_results;

	if (irg != NULL) {
		be_irg_t       *birg      = be_birg_from_irg(irg);
		size_t          n_ignores = ARRAY_SIZE(ignore_regs);
		struct obstack *obst      = &birg->obst;

		birg->allocatable_regs = rbitset_obstack_alloc(obst, N_AMD64_REGISTERS);
		rbitset_set_all(birg->allocatable_regs, N_AMD64_REGISTERS);
		for (size_t r = 0; r < n_ignores; ++r) {
			rbitset_clear(birg->allocatable_regs, ignore_regs[r]);
		}
		if (!omit_fp)
			rbitset_clear(birg->allocatable_regs, REG_RBP);
	}

	return cconv;
}