Пример #1
0
static ir_type *lower_type_if_needed(ir_type *tp)
{
	bool need_lower = false;

	size_t const n_params = get_method_n_params(tp);
	for (size_t p = 0; p < n_params; ++p) {
		ir_type *ptp   = get_method_param_type(tp, p);
		ir_mode *pmode = get_type_mode(ptp);
		if (pmode && mode_is_float(pmode)) {
			need_lower = true;
			break;
		}
	}

	size_t const n_res = get_method_n_ress(tp);
	for (size_t i = 0; i < n_res; ++i) {
		ir_type *rtp   = get_method_res_type(tp, i);
		ir_mode *rmode = get_type_mode(rtp);
		if (rmode && mode_is_float(rmode)) {
			need_lower = true;
			break;
		}
	}

	if (need_lower) {
		return lower_method_type(tp);
	} else {
		return tp;
	}
}
Пример #2
0
/**
 * Transforms a Conv into the appropriate soft float function.
 */
static bool lower_Conv(ir_node *const n)
{
	dbg_info *const dbgi    = get_irn_dbg_info(n);
	ir_node  *const block   = get_nodes_block(n);
	ir_mode  *const mode    = get_irn_mode(n);
	ir_node        *op      = get_Conv_op(n);
	ir_mode        *op_mode = get_irn_mode(op);

	char const *name;
	if (!mode_is_float(mode)) {
		if (!mode_is_float(op_mode))
			return false;
		if (mode_is_signed(mode))
			name = "fix";
		else
			name = "fixuns";
	} else if (!mode_is_float(op_mode)) {
		ir_mode *min_mode;
		if (mode_is_signed(op_mode)) {
			name     = "float";
			min_mode = mode_Is;
		} else {
			name     = "floatun";
			min_mode = mode_Iu;
		}
		if (get_mode_size_bits(op_mode) < get_mode_size_bits(min_mode)) {
			op_mode = min_mode;
			op      = new_rd_Conv(dbgi, block, op, op_mode);
		}
	} else {
		/* Remove unnecessary Convs. */
		if (op_mode == mode) {
			exchange(n, op);
			return true;
		}
		if (get_mode_size_bits(op_mode) > get_mode_size_bits(mode))
			name = "trunc";
		else
			name = "extend";
	}

	ir_node *const in[]   = { op };
	ir_node       *result = make_softfloat_call(n, name, ARRAY_SIZE(in), in);

	/* Check whether we need a Conv for the result. */
	if (get_irn_mode(result) != mode)
		result = new_rd_Conv(dbgi, block, result, mode);

	exchange(n, result);
	return true;
}
Пример #3
0
ir_tarval *get_mode_infinite(ir_mode *mode)
{
    assert(mode);
    assert(mode_is_float(mode));

    return get_tarval_plus_inf(mode);
}
Пример #4
0
int values_in_mode(const ir_mode *sm, const ir_mode *lm)
{
    assert(sm != NULL);
    assert(lm != NULL);
    if (sm == lm)
        return true;

    if (sm == mode_b)
        return mode_is_int(lm) || mode_is_float(lm);

    ir_mode_arithmetic larith = get_mode_arithmetic(lm);
    ir_mode_arithmetic sarith = get_mode_arithmetic(sm);
    switch (larith) {
    case irma_x86_extended_float:
    case irma_ieee754:
        if (sarith == irma_ieee754 || sarith == irma_x86_extended_float) {
            return get_mode_size_bits(sm) <= get_mode_size_bits(lm);
        } else if (sarith == irma_twos_complement) {
            unsigned int_mantissa
                = get_mode_size_bits(sm) - (mode_is_signed(sm) ? 1 : 0);
            unsigned float_mantissa = get_mode_mantissa_size(lm) + 1;
            return int_mantissa <= float_mantissa;
        }
        break;
    case irma_twos_complement:
        if (sarith == irma_twos_complement)
            return get_mode_size_bits(sm) <= get_mode_size_bits(lm);
        break;
    case irma_none:
        break;
    }
    return false;
}
Пример #5
0
ir_tarval *get_mode_NAN(ir_mode *mode)
{
    assert(mode);
    assert(mode_is_float(mode));

    return get_tarval_nan(mode);
}
Пример #6
0
ir_mode *get_complex_mode_arithmetic(type_t *type)
{
	ir_mode *mode = get_complex_mode_storage(type);
	if (mode_is_float(mode) && mode_float_arithmetic != NULL) {
		return mode_float_arithmetic;
	}

	return mode;
}
Пример #7
0
int smaller_mode(const ir_mode *sm, const ir_mode *lm)
{
    assert(sm != NULL);
    assert(lm != NULL);
    if (sm == lm) return true;

    switch (get_mode_sort(sm)) {
    case irms_int_number:
        switch (get_mode_sort(lm)) {
        case irms_int_number:
            if (get_mode_arithmetic(sm) != get_mode_arithmetic(lm))
                return false;

            /* only two complement implemented */
            assert(get_mode_arithmetic(sm) == irma_twos_complement);

            /* integers are convertable if
             * - both have the same sign and lm is the larger one
             * - lm is signed and is at least one bit larger (the sign) */
            unsigned sm_bits = get_mode_size_bits(sm);
            unsigned lm_bits = get_mode_size_bits(lm);
            if (mode_is_signed(sm)) {
                if (!mode_is_signed(lm))
                    return false;
            } else {
                if (mode_is_signed(lm))
                    return sm_bits < lm_bits;
            }
            return sm_bits <= lm_bits;

        case irms_auxiliary:
        case irms_data:
        case irms_internal_boolean:
        case irms_reference:
        case irms_float_number:
            /* int to float works if the float is large enough */
            return false;
        }
        panic("invalid mode_sort");

    case irms_float_number:
        return get_mode_arithmetic(sm) == get_mode_arithmetic(lm)
               && mode_is_float(lm)
               && get_mode_size_bits(lm) >= get_mode_size_bits(sm);

    case irms_auxiliary:
    case irms_data:
    case irms_internal_boolean:
    case irms_reference:
        /* do exist machines out there with different pointer lengths ?*/
        return false;
    }

    panic("invalid mode_sort");
}
Пример #8
0
/**
 * @return The lowered (floating point) mode.
 */
static ir_mode *get_lowered_mode(ir_mode *mode)
{
	if (!mode_is_float(mode))
		return mode;

	if (mode == mode_F)
		return mode_Iu;
	else if (mode == mode_D)
		return mode_Lu;

	panic("unsupported floating point type");
}
Пример #9
0
/**
 * @return The lowered method type.
 */
static ir_type *lower_method_type(ir_type *mtp)
{
	ir_type *res = pmap_get(ir_type, lowered_type, mtp);
	if (res != NULL)
		return res;

	size_t const n_param     = get_method_n_params(mtp);
	size_t const n_res       = get_method_n_ress(mtp);
	bool   const is_variadic = is_method_variadic(mtp);
	res = new_type_method(n_param, n_res, is_variadic);

	/* set param types and result types */
	for (size_t i = 0; i < n_param; ++i) {
		ir_type *ptp   = get_method_param_type(mtp, i);
		ir_mode *pmode = get_type_mode(ptp);

		if (pmode != NULL && mode_is_float(pmode)) {
			ptp = lower_type(ptp);
		}

		set_method_param_type(res, i, ptp);
	}
	for (size_t i = 0; i < n_res; ++i) {
		ir_type *rtp   = get_method_res_type(mtp, i);
		ir_mode *rmode = get_type_mode(rtp);

		if (rmode != NULL && mode_is_float(rmode)) {
			rtp = lower_type(rtp);
		}

		set_method_res_type(res, i, rtp);
	}

	copy_method_properties(res, mtp);

	set_higher_type(res, mtp);

	pmap_insert(lowered_type, mtp, res);
	return res;
}
Пример #10
0
/**
 * Transforms an Add into the appropriate soft float function.
 */
static bool lower_Add(ir_node *const n)
{
	ir_mode *const mode = get_irn_mode(n);
	if (!mode_is_float(mode))
		return false;

	ir_node *const left   = get_Add_left(n);
	ir_node *const right  = get_Add_right(n);
	ir_node *const in[]   = { left, right };
	ir_node *const result = make_softfloat_call(n, "add", ARRAY_SIZE(in), in);
	exchange(n, result);
	return true;
}
Пример #11
0
/**
 * Adapts floating point constants.
 */
static bool lower_Const(ir_node *const n)
{
	ir_mode *mode = get_irn_mode(n);
	if (!mode_is_float(mode))
		return false;

	ir_tarval *float_tv     = get_Const_tarval(n);
	ir_mode   *lowered_mode = get_lowered_mode(mode);
	ir_tarval *int_tv       = tarval_bitcast(float_tv, lowered_mode);

	set_irn_mode(n, lowered_mode);
	set_Const_tarval(n, int_tv);
	return true;
}
Пример #12
0
/**
 * construct an interval from a value
 *
 * @return the filled interval or NULL if no interval
 *         can be created (happens only on floating point
 */
static interval_t *get_interval_from_tv(interval_t *iv, ir_tarval *tv)
{
	ir_mode *mode = get_tarval_mode(tv);

	if (tv == tarval_bad) {
		if (mode_is_float(mode)) {
			/* NaN could be included which we cannot handle */
			iv->min   = tarval_bad;
			iv->max   = tarval_bad;
			iv->flags = MIN_EXCLUDED | MAX_EXCLUDED;
			return NULL;
		} else {
			/* [-oo, +oo] */
			iv->min   = get_mode_min(mode);
			iv->max   = get_mode_max(mode);
			iv->flags = MIN_INCLUDED | MAX_INCLUDED;
			return iv;
		}
	}

	if (mode_is_float(mode)) {
		if (tv == get_mode_NAN(mode)) {
			/* arg, we cannot handle NaN's. */
			iv->min   = tarval_bad;
			iv->max   = tarval_bad;
			iv->flags = MIN_EXCLUDED | MAX_EXCLUDED;
			return NULL;
		}
	}

	/* [tv, tv] */
	iv->min   = tv;
	iv->max   = tv;
	iv->flags = MIN_INCLUDED | MAX_INCLUDED;

	return iv;
}
Пример #13
0
static bool lower_Bitcast(ir_node *const n)
{
	/* bitcast was casting float->int or int->float we can simply replace it
	 * with a conv (between integer modes) now. */
	ir_node *op       = get_Bitcast_op(n);
	ir_mode *src_mode = get_irn_mode(op);
	ir_mode *dst_mode = get_irn_mode(n);
	/* note that the predecessor may already be transformed, so it's
	 * possible that we don't see a float mode anymore. */
	if (mode_is_float(dst_mode))
		dst_mode = get_lowered_mode(dst_mode);
	ir_node *res = op;
	if (src_mode != dst_mode) {
		dbg_info *dbgi  = get_irn_dbg_info(n);
		ir_node  *block = get_nodes_block(n);
		res = new_rd_Conv(dbgi, block, op, dst_mode);
	}
	exchange(n, res);
	return true;
}
Пример #14
0
/**
 * Transforms a Div into the appropriate soft float function.
 */
static bool lower_Div(ir_node *const n)
{
	ir_mode *const mode = get_Div_resmode(n);
	if (!mode_is_float(mode))
		return false;

	ir_node *const left   = get_Div_left(n);
	ir_node *const right  = get_Div_right(n);
	ir_node *const in[]   = { left, right };
	ir_node *const result = make_softfloat_call(n, "div", ARRAY_SIZE(in), in);
	ir_node *const call   = skip_Proj(skip_Proj(result));
	set_irn_pinned(call, get_irn_pinned(n));

	foreach_out_edge_safe(n, edge) {
		ir_node *proj = get_edge_src_irn(edge);
		if (!is_Proj(proj))
			continue;

		switch ((pn_Div)get_Proj_num(proj)) {
		case pn_Div_M:
			set_Proj_pred(proj, call);
			set_Proj_num(proj, pn_Call_M);
			continue;
		case pn_Div_X_regular:
			set_Proj_pred(proj, call);
			set_Proj_num(proj, pn_Call_X_regular);
			continue;
		case pn_Div_X_except:
			set_Proj_pred(proj, call);
			set_Proj_num(proj, pn_Call_X_except);
			continue;
		case pn_Div_res:
			exchange(proj, result);
			continue;
		}
		panic("unexpected Proj number");
	}
Пример #15
0
/**
 * lower 64bit conversions
 */
static void ia32_lower_conv64(ir_node *node, ir_mode *mode)
{
	dbg_info  *dbg       = get_irn_dbg_info(node);
	ir_node   *op        = get_Conv_op(node);
	ir_mode   *mode_from = get_irn_mode(op);
	ir_mode   *mode_to   = get_irn_mode(node);

	if (mode_is_float(mode_from) && get_mode_size_bits(mode_to) == 64
	    && get_mode_arithmetic(mode_to) == irma_twos_complement) {
		/* We have a Conv float -> long long here */
		ir_node *float_to_ll;
		ir_node *l_res;
		ir_node *h_res;
		if (mode_is_signed(mode)) {
			/* convert from float to signed 64bit */
			ir_node *block = get_nodes_block(node);
			float_to_ll = new_bd_ia32_l_FloattoLL(dbg, block, op);
			l_res = new_r_Proj(float_to_ll, ia32_mode_gp,
			                   pn_ia32_l_FloattoLL_res_low);
			h_res = new_r_Proj(float_to_ll, mode,
							   pn_ia32_l_FloattoLL_res_high);
		} else {
			/* Convert from float to unsigned 64bit. */
			ir_graph  *irg = get_irn_irg(node);
			ir_tarval *flt_tv
				= new_tarval_from_str("9223372036854775808", 19, x86_mode_E);
			ir_node   *flt_corr  = new_r_Const(irg, flt_tv);

			ir_node *lower_blk = part_block_dw(node);
			ir_node *upper_blk = get_nodes_block(node);
			set_dw_control_flow_changed();

			ir_node *opc  = new_rd_Conv(dbg, upper_blk, op, x86_mode_E);
			ir_node *cmp  = new_rd_Cmp(dbg, upper_blk, opc, flt_corr,
			                           ir_relation_less);
			ir_node *cond = new_rd_Cond(dbg, upper_blk, cmp);
			ir_node *in[] = {
				new_r_Proj(cond, mode_X, pn_Cond_true),
				new_r_Proj(cond, mode_X, pn_Cond_false)
			};
			ir_node *blk   = new_r_Block(irg, 1, &in[1]);
			in[1] = new_r_Jmp(blk);

			set_irn_in(lower_blk, 2, in);

			/* create to Phis */
			ir_node *phi_in[] = {
				new_r_Const_null(irg, mode),
				new_r_Const_long(irg, mode, 0x80000000)
			};
			ir_node *int_phi
				= new_r_Phi(lower_blk, ARRAY_SIZE(phi_in), phi_in, mode);

			ir_node *fphi_in[] = {
				opc,
				new_rd_Sub(dbg, upper_blk, opc, flt_corr, x86_mode_E)
			};
			ir_node *flt_phi
				= new_r_Phi(lower_blk, ARRAY_SIZE(fphi_in), fphi_in,
				            x86_mode_E);

			/* fix Phi links for next part_block() */
			if (is_Phi(int_phi))
				add_Block_phi(lower_blk, int_phi);
			if (is_Phi(flt_phi))
				add_Block_phi(lower_blk, flt_phi);

			float_to_ll = new_bd_ia32_l_FloattoLL(dbg, lower_blk, flt_phi);
			l_res = new_r_Proj(float_to_ll, ia32_mode_gp,
							   pn_ia32_l_FloattoLL_res_low);
			h_res = new_r_Proj(float_to_ll, mode,
							   pn_ia32_l_FloattoLL_res_high);
			h_res = new_rd_Add(dbg, lower_blk, h_res, int_phi, mode);

			/* move the call and its Proj's to the lower block */
			set_nodes_block(node, lower_blk);
			for (ir_node *proj = (ir_node*)get_irn_link(node); proj != NULL;
			     proj = (ir_node*)get_irn_link(proj)) {
				set_nodes_block(proj, lower_blk);
			}
		}
		ir_set_dw_lowered(node, l_res, h_res);
	} else if (get_mode_size_bits(mode_from) == 64
	           && get_mode_arithmetic(mode_from) == irma_twos_complement
	           && mode_is_float(mode_to)) {
		/* We have a Conv long long -> float here */
		ir_node *op_low  = get_lowered_low(op);
		ir_node *op_high = get_lowered_high(op);
		ir_node *block   = get_nodes_block(node);
		ir_node *ll_to_float
			= new_bd_ia32_l_LLtoFloat(dbg, block, op_high, op_low, mode_to);

		exchange(node, ll_to_float);
	} else {
		ir_default_lower_dw_Conv(node, mode);
	}
}
Пример #16
0
calling_convention_t *arm_decide_calling_convention(const ir_graph *irg,
                                                    ir_type *function_type)
{
	/* determine how parameters are passed */
	unsigned            stack_offset = 0;
	size_t const        n_param_regs = ARRAY_SIZE(param_regs);
	size_t const        n_params     = get_method_n_params(function_type);
	size_t              regnum       = 0;
	reg_or_stackslot_t *params       = XMALLOCNZ(reg_or_stackslot_t, n_params);

	for (size_t i = 0; i < n_params; ++i) {
		ir_type            *param_type = get_method_param_type(function_type,i);
		ir_mode            *mode       = get_type_mode(param_type);
		int                 bits       = get_mode_size_bits(mode);
		reg_or_stackslot_t *param      = &params[i];
		param->type = param_type;

		/* doubleword modes need to be passed in even registers */
		if (param_type->flags & tf_lowered_dw) {
			if (regnum < n_param_regs) {
				if ((regnum & 1) != 0)
					++regnum;
			} else {
				unsigned misalign = stack_offset % 8;
				if (misalign > 0)
					stack_offset += 8 - misalign;
			}
		}

		if (regnum < n_param_regs) {
			param->reg0 = param_regs[regnum++];
		} else {
			param->offset = stack_offset;
			/* increase offset 4 bytes so everything is aligned */
			stack_offset += MAX(bits / 8, 4);
			continue;
		}

		/* we might need a 2nd 32bit component (for 64bit or double values) */
		if (bits > 32) {
			if (bits > 64)
				panic("only 32 and 64bit modes supported");

			if (regnum < n_param_regs) {
				const arch_register_t *reg = param_regs[regnum++];
				param->reg1 = reg;
			} else {
				ir_mode *pmode = param_regs[0]->cls->mode;
				ir_type *type  = get_type_for_mode(pmode);
				param->type    = type;
				param->offset  = stack_offset;
				assert(get_mode_size_bits(pmode) == 32);
				stack_offset += 4;
			}
		}
	}
	unsigned const n_param_regs_used = regnum;

	size_t const        n_result_regs= ARRAY_SIZE(result_regs);
	size_t const n_float_result_regs = ARRAY_SIZE(float_result_regs);
	size_t              n_results    = get_method_n_ress(function_type);
	size_t              float_regnum = 0;
	reg_or_stackslot_t *results      = XMALLOCNZ(reg_or_stackslot_t, n_results);
	regnum = 0;
	for (size_t i = 0; i < n_results; ++i) {
		ir_type            *result_type = get_method_res_type(function_type, i);
		ir_mode            *result_mode = get_type_mode(result_type);
		reg_or_stackslot_t *result      = &results[i];

		if (mode_is_float(result_mode)) {
			if (float_regnum >= n_float_result_regs) {
				panic("too many float results");
			} else {
				const arch_register_t *reg = float_result_regs[float_regnum++];
				result->reg0 = reg;
			}
		} else {
			if (get_mode_size_bits(result_mode) > 32) {
				panic("results with more than 32bits not supported yet");
			}

			if (regnum >= n_result_regs) {
				panic("too many results");
			} else {
				const arch_register_t *reg = result_regs[regnum++];
				result->reg0 = reg;
			}
		}
	}

	calling_convention_t *cconv = XMALLOCZ(calling_convention_t);
	cconv->parameters       = params;
	cconv->n_parameters     = n_params;
	cconv->param_stack_size = stack_offset;
	cconv->n_param_regs     = n_param_regs_used;
	cconv->results          = results;

	/* setup allocatable registers */
	if (irg != NULL) {
		be_irg_t *birg = be_birg_from_irg(irg);

		assert(birg->allocatable_regs == NULL);
		birg->allocatable_regs = be_cconv_alloc_all_regs(&birg->obst, N_ARM_REGISTERS);
		be_cconv_rem_regs(birg->allocatable_regs, ignore_regs, ARRAY_SIZE(ignore_regs));
		arm_get_irg_data(irg)->omit_fp = true;
	}

	return cconv;
}
Пример #17
0
ir_tarval *get_mode_infinite(const ir_mode *mode)
{
    assert(mode_is_float(mode));
    return mode->infinity;
}
Пример #18
0
x86_cconv_t *amd64_decide_calling_convention(ir_type *function_type,
                                             ir_graph *irg)
{
	bool omit_fp = false;
	if (irg != NULL) {
		omit_fp = be_options.omit_fp;
		if (omit_fp)
			irg_walk_graph(irg, check_omit_fp, NULL, &omit_fp);
		amd64_get_irg_data(irg)->omit_fp = omit_fp;
	}

	unsigned *caller_saves = rbitset_malloc(N_AMD64_REGISTERS);
	unsigned *callee_saves = rbitset_malloc(N_AMD64_REGISTERS);
	rbitset_copy(caller_saves, default_caller_saves, N_AMD64_REGISTERS);
	rbitset_copy(callee_saves, default_callee_saves, N_AMD64_REGISTERS);

	/* determine how parameters are passed */
	size_t              n_params           = get_method_n_params(function_type);
	size_t              param_regnum       = 0;
	size_t              float_param_regnum = 0;
	reg_or_stackslot_t *params             = XMALLOCNZ(reg_or_stackslot_t,
	                                                   n_params);
	/* x64 always reserves space to spill the first 4 arguments to have it
	 * easy in case of variadic functions. */
	unsigned stack_offset = amd64_use_x64_abi ? 32 : 0;
	for (size_t i = 0; i < n_params; ++i) {
		ir_type *param_type = get_method_param_type(function_type,i);
		if (is_compound_type(param_type))
			panic("compound arguments NIY");

		ir_mode *mode = get_type_mode(param_type);
		int      bits = get_mode_size_bits(mode);
		reg_or_stackslot_t *param = &params[i];

		if (mode_is_float(mode) && float_param_regnum < n_float_param_regs
		    && mode != x86_mode_E) {
			param->reg = float_param_regs[float_param_regnum++];
			if (amd64_use_x64_abi) {
				++param_regnum;
			}
		} else if (!mode_is_float(mode) && param_regnum < n_param_regs) {
			param->reg = param_regs[param_regnum++];
			if (amd64_use_x64_abi) {
				++float_param_regnum;
			}
		} else {
			param->type   = param_type;
			param->offset = stack_offset;
			/* increase offset by at least AMD64_REGISTER_SIZE bytes so
			 * everything is aligned */
			stack_offset += round_up2(bits / 8, AMD64_REGISTER_SIZE);
		}
	}

	/* If the function is variadic, we add all unused parameter
	 * passing registers to the end of the params array, first GP,
	 * then XMM. */
	if (irg && is_method_variadic(function_type)) {
		if (amd64_use_x64_abi) {
			panic("Variadic functions on Windows ABI not supported");
		}

		int params_remaining = (n_param_regs - param_regnum) +
			(n_float_param_regs - float_param_regnum);
		params = XREALLOC(params, reg_or_stackslot_t, n_params + params_remaining);
		size_t i = n_params;

		for (; param_regnum < n_param_regs; param_regnum++, i++) {
			params[i].reg = param_regs[param_regnum];
		}

		for (; float_param_regnum < n_float_param_regs; float_param_regnum++, i++) {
			params[i].reg = float_param_regs[float_param_regnum];
		}
	}

	unsigned n_param_regs_used
		= amd64_use_x64_abi ? param_regnum : param_regnum + float_param_regnum;

	/* determine how results are passed */
	size_t              n_results           = get_method_n_ress(function_type);
	unsigned            n_reg_results       = 0;
	reg_or_stackslot_t *results = XMALLOCNZ(reg_or_stackslot_t, n_results);
	unsigned            res_regnum          = 0;
	unsigned            res_float_regnum    = 0;
	unsigned            res_x87_regnum      = 0;
	size_t              n_result_regs       = ARRAY_SIZE(result_regs);
	size_t              n_float_result_regs = ARRAY_SIZE(float_result_regs);
	size_t              n_x87_result_regs   = ARRAY_SIZE(x87_result_regs);
	for (size_t i = 0; i < n_results; ++i) {
		ir_type            *result_type = get_method_res_type(function_type, i);
		ir_mode            *result_mode = get_type_mode(result_type);
		reg_or_stackslot_t *result      = &results[i];

		const arch_register_t *reg;
		if (result_mode == x86_mode_E) {
			if (res_x87_regnum >= n_x87_result_regs)
				panic("too manu x87 floating point results");
			reg = x87_result_regs[res_x87_regnum++];
		} else if (mode_is_float(result_mode)) {
			if (res_float_regnum >= n_float_result_regs) {
				panic("too many floating points results");
			}
			reg = float_result_regs[res_float_regnum++];
		} else {
			if (res_regnum >= n_result_regs) {
				panic("too many results");
			}
			reg = result_regs[res_regnum++];
		}
		result->reg = reg;
		rbitset_clear(caller_saves, reg->global_index);
		++n_reg_results;
	}

	x86_cconv_t *cconv     = XMALLOCZ(x86_cconv_t);
	cconv->parameters      = params;
	cconv->n_parameters    = n_params;
	cconv->param_stacksize = stack_offset;
	cconv->n_param_regs    = n_param_regs_used;
	cconv->n_xmm_regs      = float_param_regnum;
	cconv->results         = results;
	cconv->omit_fp         = omit_fp;
	cconv->caller_saves    = caller_saves;
	cconv->callee_saves    = callee_saves;
	cconv->n_reg_results   = n_reg_results;

	if (irg != NULL) {
		be_irg_t *birg = be_birg_from_irg(irg);

		birg->allocatable_regs = be_cconv_alloc_all_regs(&birg->obst, N_AMD64_REGISTERS);
		be_cconv_rem_regs(birg->allocatable_regs, ignore_regs, ARRAY_SIZE(ignore_regs));
		if (!omit_fp)
			rbitset_clear(birg->allocatable_regs, REG_RBP);
	}

	return cconv;
}
Пример #19
0
/**
 * construct an interval from a Confirm
 *
 * @param iv       an empty interval, will be filled
 * @param bound    the bound value
 * @param relation the Confirm compare relation
 *
 * @return the filled interval or NULL if no interval
 *         can be created (happens only on floating point
 */
static interval_t *get_interval(interval_t *iv, ir_node *bound, ir_relation relation)
{
	ir_mode   *mode = get_irn_mode(bound);
	ir_tarval *tv   = value_of(bound);

	if (tv == tarval_bad) {
		/* There is nothing we could do here. For integer
		 * modes we could return [-oo, +oo], but there is
		 * nothing we could deduct from such an interval.
		 * So, speed things up and return unknown.
		 */
		iv->min   = tarval_bad;
		iv->max   = tarval_bad;
		iv->flags = MIN_EXCLUDED | MAX_EXCLUDED;
		return NULL;
	}

	if (mode_is_float(mode)) {
		if (tv == get_mode_NAN(mode)) {
			/* arg, we cannot handle NaN's. */
			iv->min   = tarval_bad;
			iv->max   = tarval_bad;
			iv->flags = MIN_EXCLUDED | MAX_EXCLUDED;

			return NULL;
		}
	}

	/* check which side is known */
	switch (relation) {
	case ir_relation_equal:
		/* [tv, tv] */
		iv->min   =
			iv->max   = tv;
		iv->flags = MIN_INCLUDED | MAX_INCLUDED;
		break;

	case ir_relation_less_equal:
		/* [-oo, tv] */
		iv->min   = get_mode_min(mode);
		iv->max   = tv;
		iv->flags = MIN_INCLUDED | MAX_INCLUDED;
		break;

	case ir_relation_less:
		/* [-oo, tv) */
		iv->min   = get_mode_min(mode);
		iv->max   = tv;
		iv->flags = MIN_INCLUDED | MAX_EXCLUDED;
		break;

	case ir_relation_greater:
		/* (tv, +oo] */
		iv->min   = tv;
		iv->max   = get_mode_max(mode);
		iv->flags = MIN_EXCLUDED | MAX_INCLUDED;
		break;

	case ir_relation_greater_equal:
		/* [tv, +oo] */
		iv->min   = tv;
		iv->max   = get_mode_max(mode);
		iv->flags = MIN_INCLUDED | MAX_INCLUDED;
		break;

	case ir_relation_less_equal_greater:
		/*
		 * Ordered means, that at least neither
		 * our bound nor our value ara NaN's
		 */
		/* [-oo, +oo] */
		iv->min   = get_mode_min(mode);
		iv->max   = get_mode_max(mode);
		iv->flags = MIN_INCLUDED | MAX_INCLUDED;
		break;

	default:
		/*
		 * We do not handle UNORDERED, as a NaN
		 * could be included in the interval.
		 */
		iv->min   = tarval_bad;
		iv->max   = tarval_bad;
		iv->flags = MIN_EXCLUDED | MAX_EXCLUDED;
		return NULL;
	}

	if (iv->min != tarval_bad && iv->max != tarval_bad)
		return iv;
	return NULL;
}
Пример #20
0
x86_cconv_t *ia32_decide_calling_convention(ir_type *function_type,
                                            ir_graph *irg)
{
	bool omit_fp = false;
	if (irg != NULL) {
		omit_fp = be_options.omit_fp;
		if (omit_fp)
			irg_walk_graph(irg, check_omit_fp, NULL, &omit_fp);
	}

	mtp_additional_properties mtp
		= get_method_additional_properties(function_type);
	(void)mtp;
	/* TODO: do something with cc_reg_param/cc_this_call */

	unsigned *caller_saves = rbitset_malloc(N_IA32_REGISTERS);
	unsigned *callee_saves = rbitset_malloc(N_IA32_REGISTERS);
	rbitset_copy(caller_saves, default_caller_saves, N_IA32_REGISTERS);
	rbitset_copy(callee_saves, default_callee_saves, N_IA32_REGISTERS);

	/* determine how parameters are passed */
	unsigned            n_params           = get_method_n_params(function_type);
	unsigned            param_regnum       = 0;
	unsigned            float_param_regnum = 0;
	reg_or_stackslot_t *params             = XMALLOCNZ(reg_or_stackslot_t,
	                                                   n_params);

	unsigned n_param_regs       = ARRAY_SIZE(default_param_regs);
	unsigned n_float_param_regs = ARRAY_SIZE(float_param_regs);
	unsigned stack_offset       = 0;
	for (unsigned i = 0; i < n_params; ++i) {
		ir_type            *param_type = get_method_param_type(function_type, i);
		reg_or_stackslot_t *param      = &params[i];
		if (is_aggregate_type(param_type)) {
			param->type   = param_type;
			param->offset = stack_offset;
			stack_offset += get_type_size_bytes(param_type);
			goto align_stack;
		}

		ir_mode *mode = get_type_mode(param_type);
		if (mode_is_float(mode) && float_param_regnum < n_float_param_regs) {
			param->reg = float_param_regs[float_param_regnum++];
		} else if (!mode_is_float(mode) && param_regnum < n_param_regs) {
			param->reg = default_param_regs[param_regnum++];
		} else {
			param->type   = param_type;
			param->offset = stack_offset;
			stack_offset += get_type_size_bytes(param_type);
align_stack:;
			/* increase offset by at least IA32_REGISTER_SIZE bytes so
			 * everything is aligned */
			unsigned misalign = stack_offset % IA32_REGISTER_SIZE;
			if (misalign > 0)
				stack_offset += IA32_REGISTER_SIZE - misalign;
		}
	}

	unsigned n_param_regs_used = param_regnum + float_param_regnum;

	/* determine how results are passed */
	unsigned            n_results           = get_method_n_ress(function_type);
	unsigned            n_reg_results       = 0;
	reg_or_stackslot_t *results = XMALLOCNZ(reg_or_stackslot_t, n_results);
	unsigned            res_regnum          = 0;
	unsigned            res_float_regnum    = 0;
	unsigned            n_result_regs       = ARRAY_SIZE(result_regs);
	unsigned            n_float_result_regs = ARRAY_SIZE(float_result_regs);
	for (size_t i = 0; i < n_results; ++i) {
		ir_type            *result_type = get_method_res_type(function_type, i);
		ir_mode            *result_mode = get_type_mode(result_type);
		reg_or_stackslot_t *result      = &results[i];

		const arch_register_t *reg;
		if (mode_is_float(result_mode)) {
			if (res_float_regnum >= n_float_result_regs) {
				panic("too many floating points results");
			}
			reg = float_result_regs[res_float_regnum++];
		} else {
			if (res_regnum >= n_result_regs) {
				panic("too many results");
			}
			reg = result_regs[res_regnum++];
		}
		result->reg = reg;
		rbitset_clear(caller_saves, reg->global_index);
		++n_reg_results;
	}

	calling_convention cc = get_method_calling_convention(function_type);

	x86_cconv_t *cconv    = XMALLOCZ(x86_cconv_t);
	cconv->sp_delta       = (cc & cc_compound_ret) && !(cc & cc_reg_param)
	                        ? IA32_REGISTER_SIZE : 0;
	cconv->parameters     = params;
	cconv->n_parameters   = n_params;
	cconv->callframe_size = stack_offset;
	cconv->n_param_regs   = n_param_regs_used;
	cconv->n_xmm_regs     = float_param_regnum;
	cconv->results        = results;
	cconv->omit_fp        = omit_fp;
	cconv->caller_saves   = caller_saves;
	cconv->callee_saves   = callee_saves;
	cconv->n_reg_results  = n_reg_results;

	if (irg != NULL) {
		be_irg_t       *birg      = be_birg_from_irg(irg);
		size_t          n_ignores = ARRAY_SIZE(ignore_regs);
		struct obstack *obst      = &birg->obst;

		birg->allocatable_regs = rbitset_obstack_alloc(obst, N_IA32_REGISTERS);
		rbitset_set_all(birg->allocatable_regs, N_IA32_REGISTERS);
		for (size_t r = 0; r < n_ignores; ++r) {
			rbitset_clear(birg->allocatable_regs, ignore_regs[r]);
		}
		if (!omit_fp)
			rbitset_clear(birg->allocatable_regs, REG_EBP);
	}

	return cconv;
}
Пример #21
0
/**
 * Transforms a Cmp into the appropriate soft float function.
 */
static bool lower_Cmp(ir_node *const n)
{
	ir_node *const left    = get_Cmp_left(n);
	ir_mode *const op_mode = get_irn_mode(left);
	if (!mode_is_float(op_mode))
		return false;

	dbg_info *const dbgi = get_irn_dbg_info(n);
	ir_graph *const irg  = get_irn_irg(n);
	ir_node  *const zero = new_rd_Const_null(dbgi, irg, mode_Is);

	char const  *name     = NULL;
	char const  *name2    = NULL;
	ir_node     *result   = NULL;
	ir_relation  relation = get_Cmp_relation(n);
	switch (relation) {
	case ir_relation_false:
		result = zero;
		break;
	case ir_relation_equal:
		name = "eq";
		break;
	case ir_relation_less:
		name = "lt";
		break;
	case ir_relation_greater:
		name = "gt";
		break;
	case ir_relation_unordered:
		name     = "unord";
		relation = ir_relation_less_greater;
		break;
	case ir_relation_less_equal:
		name = "le";
		break;
	case ir_relation_greater_equal:
		name = "ge";
		break;
	case ir_relation_less_greater:
		name  = "unord";
		name2 = "ne";
		break;
	case ir_relation_less_equal_greater:
		name     = "unord";
		relation = ir_relation_equal;
		break;
	case ir_relation_unordered_equal:
		name     = "unord";
		relation = ir_relation_less_greater;
		name2    = "ne";
		break;
	case ir_relation_unordered_less:
		name     = "ge";
		relation = ir_relation_less;
		break;
	case ir_relation_unordered_less_equal:
		name     = "gt";
		relation = ir_relation_less_equal;
		break;
	case ir_relation_unordered_greater:
		name     = "le";
		relation = ir_relation_greater;
		break;
	case ir_relation_unordered_greater_equal:
		name     = "lt";
		relation = ir_relation_greater_equal;
		break;
	case ir_relation_unordered_less_greater:
		name     = "eq";
		relation = ir_relation_less_greater;
		break;
	case ir_relation_true:
		result = zero;
		break;
	}

	ir_node *const block = get_nodes_block(n);
	ir_node *const right = get_Cmp_right(n);

	if (result == NULL) {
		ir_node *const in[] = { left, right };
		result = make_softfloat_call(n, name, ARRAY_SIZE(in), in);
	}

	ir_node *cmp = new_r_Cmp(block, result, zero, relation);

	/* We need two calls into the softfloat library */
	if (name2 != NULL) {
		ir_node *const in[] = { left, right };
		result   = make_softfloat_call(n, name2, ARRAY_SIZE(in), in);
		relation = get_Cmp_relation(n);

		ir_node *const mux = new_rd_Mux(dbgi, block, cmp, result, zero);

		arch_allow_ifconv_func const allow_ifconv
			= be_get_backend_param()->allow_ifconv;
		if (!allow_ifconv(cmp, result, zero))
			ir_nodeset_insert(&created_mux_nodes, mux);

		cmp = new_r_Cmp(block, mux, zero, relation);
	}

	exchange(n, cmp);
	return true;
}
Пример #22
0
calling_convention_t *sparc_decide_calling_convention(ir_type *function_type,
                                                      ir_graph *irg)
{
	bool omit_fp = false;
	if (irg != NULL) {
		omit_fp = be_options.omit_fp;
		/* our current vaarg handling needs the standard space to store the
		 * args 0-5 in it */
		if (is_method_variadic(function_type))
			omit_fp = false;
		/* The pointer to the aggregate return value belongs to the 92 magic bytes.
		 * Thus, if the called functions increases the stack size,
		 * it must copy the value to the appropriate location.
		 * This is not implemented yet, so we forbid to omit the frame pointer.
		 */
		if (get_method_calling_convention(function_type) & cc_compound_ret)
			omit_fp = false;
		if (omit_fp)
			irg_walk_graph(irg, check_omit_fp, NULL, &omit_fp);
		sparc_get_irg_data(irg)->omit_fp = omit_fp;
	}

	mtp_additional_properties mtp
		= get_method_additional_properties(function_type);
	unsigned *caller_saves = rbitset_malloc(N_SPARC_REGISTERS);
	if (mtp & mtp_property_returns_twice) {
		rbitset_copy(caller_saves, default_returns_twice_saves,
		             N_SPARC_REGISTERS);
	} else {
		rbitset_copy(caller_saves, default_caller_saves, N_SPARC_REGISTERS);
	}

	/* determine how parameters are passed */
	int                 n_params = get_method_n_params(function_type);
	int                 regnum   = 0;
	reg_or_stackslot_t *params   = XMALLOCNZ(reg_or_stackslot_t, n_params);

	int      n_param_regs = ARRAY_SIZE(param_regs);
	unsigned stack_offset = !omit_fp ? SPARC_MIN_STACKSIZE : 0;
	for (int i = 0; i < n_params; ++i) {
		ir_type            *param_type = get_method_param_type(function_type,i);
		ir_mode            *mode;
		int                 bits;
		reg_or_stackslot_t *param;

		if (is_compound_type(param_type))
			panic("compound arguments not supported yet");

		mode  = get_type_mode(param_type);
		bits  = get_mode_size_bits(mode);
		param = &params[i];

		if (i == 0 &&
		    (get_method_calling_convention(function_type) & cc_compound_ret)) {
			assert(mode_is_reference(mode) && bits == 32);
			/* special case, we have reserved space for this on the between
			 * type */
			param->type   = param_type;
			param->offset = SPARC_AGGREGATE_RETURN_OFFSET;
			param->already_stored = true;
			continue;
		}

		if (regnum < n_param_regs) {
			param->offset = SPARC_PARAMS_SPILL_OFFSET
			                + regnum * SPARC_REGISTER_SIZE;
			param->type   = param_type;
			arch_register_t const *reg = param_regs[regnum++];
			if (irg == NULL || omit_fp)
				reg = map_i_to_o_reg(reg);
			param->reg0 = reg;
			param->req0 = reg->single_req;
		} else {
			param->type   = param_type;
			param->offset = stack_offset;
			param->already_stored = true;
			/* increase offset by at least SPARC_REGISTER_SIZE bytes so
			 * everything is aligned */
			stack_offset += MAX(bits / 8, SPARC_REGISTER_SIZE);
			continue;
		}

		/* we might need a 2nd 32bit component (for 64bit or double values) */
		if (bits > 32) {
			if (bits > 64)
				panic("only 32 and 64bit modes supported");

			if (regnum < n_param_regs) {
				param->offset = SPARC_PARAMS_SPILL_OFFSET
				                + regnum * SPARC_REGISTER_SIZE;
				arch_register_t const *reg = param_regs[regnum++];
				if (irg == NULL || omit_fp)
					reg = map_i_to_o_reg(reg);
				param->reg1 = reg;
				param->req1 = reg->single_req;
			} else {
				ir_mode *regmode = param_regs[0]->cls->mode;
				ir_type *type    = get_type_for_mode(regmode);
				param->type      = type;
				param->offset    = stack_offset;
				assert(get_mode_size_bits(regmode) == 32);
				stack_offset += SPARC_REGISTER_SIZE;
			}
		}
	}
	unsigned n_param_regs_used = regnum;

	/* determine how results are passed */
	int                 n_results           = get_method_n_ress(function_type);
	unsigned            float_regnum        = 0;
	unsigned            n_reg_results       = 0;
	unsigned            n_float_result_regs = ARRAY_SIZE(float_result_regs);
	reg_or_stackslot_t *results = XMALLOCNZ(reg_or_stackslot_t, n_results);
	regnum        = 0;
	for (int i = 0; i < n_results; ++i) {
		ir_type            *result_type = get_method_res_type(function_type, i);
		ir_mode            *result_mode = get_type_mode(result_type);
		reg_or_stackslot_t *result      = &results[i];

		if (mode_is_float(result_mode)) {
			unsigned n_regs   = determine_n_float_regs(result_mode);
			unsigned next_reg = round_up2(float_regnum, n_regs);

			if (next_reg >= n_float_result_regs) {
				panic("too many float results");
			} else {
				const arch_register_t *reg = float_result_regs[next_reg];
				rbitset_clear(caller_saves, reg->global_index);
				if (n_regs == 1) {
					result->req0 = reg->single_req;
				} else if (n_regs == 2) {
					result->req0 = &float_result_reqs_double[next_reg];
					rbitset_clear(caller_saves, reg->global_index+1);
				} else if (n_regs == 4) {
					result->req0 = &float_result_reqs_quad[next_reg];
					rbitset_clear(caller_saves, reg->global_index+1);
					rbitset_clear(caller_saves, reg->global_index+2);
					rbitset_clear(caller_saves, reg->global_index+3);
				} else {
					panic("invalid number of registers in result");
				}
				float_regnum = next_reg + n_regs;

				++n_reg_results;
			}
		} else {
			if (get_mode_size_bits(result_mode) > 32) {
				panic("results with more than 32bits not supported yet");
			}

			if (regnum >= n_param_regs) {
				panic("too many results");
			} else {
				const arch_register_t *reg = param_regs[regnum++];
				if (irg == NULL || omit_fp)
					reg = map_i_to_o_reg(reg);
				result->req0 = reg->single_req;
				rbitset_clear(caller_saves, reg->global_index);
				++n_reg_results;
			}
		}
	}

	calling_convention_t *cconv = XMALLOCZ(calling_convention_t);
	cconv->n_parameters     = n_params;
	cconv->parameters       = params;
	cconv->param_stack_size = stack_offset - SPARC_MIN_STACKSIZE;
	cconv->n_param_regs     = n_param_regs_used;
	cconv->results          = results;
	cconv->omit_fp          = omit_fp;
	cconv->caller_saves     = caller_saves;
	cconv->n_reg_results    = n_reg_results;

	/* setup ignore register array */
	if (irg != NULL) {
		be_irg_t *birg = be_birg_from_irg(irg);

		birg->allocatable_regs = be_cconv_alloc_all_regs(&birg->obst, N_SPARC_REGISTERS);
		be_cconv_rem_regs(birg->allocatable_regs, ignore_regs, ARRAY_SIZE(ignore_regs));
	}

	return cconv;
}
Пример #23
0
x86_cconv_t *amd64_decide_calling_convention(ir_type *function_type,
                                             ir_graph *irg)
{
	bool omit_fp = false;
	if (irg != NULL) {
		omit_fp = be_options.omit_fp;
		if (omit_fp)
			irg_walk_graph(irg, check_omit_fp, NULL, &omit_fp);
	}

	mtp_additional_properties mtp
		= get_method_additional_properties(function_type);
	unsigned *caller_saves = rbitset_malloc(N_AMD64_REGISTERS);
	unsigned *callee_saves = rbitset_malloc(N_AMD64_REGISTERS);
	if (mtp & mtp_property_returns_twice)
		panic("amd64: returns_twice calling convention NIY");
	rbitset_copy(caller_saves, default_caller_saves, N_AMD64_REGISTERS);
	rbitset_copy(callee_saves, default_callee_saves, N_AMD64_REGISTERS);

	/* determine how parameters are passed */
	size_t              n_params           = get_method_n_params(function_type);
	size_t              param_regnum       = 0;
	size_t              float_param_regnum = 0;
	reg_or_stackslot_t *params             = XMALLOCNZ(reg_or_stackslot_t,
	                                                   n_params);
	/* x64 always reserves space to spill the first 4 arguments to have it
	 * easy in case of variadic functions. */
	unsigned stack_offset = amd64_use_x64_abi ? 32 : 0;
	for (size_t i = 0; i < n_params; ++i) {
		ir_type *param_type = get_method_param_type(function_type,i);
		if (is_compound_type(param_type))
			panic("amd64: compound arguments NIY");

		ir_mode *mode = get_type_mode(param_type);
		int      bits = get_mode_size_bits(mode);
		reg_or_stackslot_t *param = &params[i];

		if (mode_is_float(mode) && float_param_regnum < n_float_param_regs) {
			param->reg = float_param_regs[float_param_regnum++];
			if (amd64_use_x64_abi)
				++param_regnum;
		} else if (!mode_is_float(mode) && param_regnum < n_param_regs) {
			param->reg = param_regs[param_regnum++];
			if (amd64_use_x64_abi)
				++float_param_regnum;
		} else {
			param->type   = param_type;
			param->offset = stack_offset;
			/* increase offset by at least AMD64_REGISTER_SIZE bytes so
			 * everything is aligned */
			stack_offset += MAX(bits / 8, AMD64_REGISTER_SIZE);
			continue;
		}

	}

	unsigned n_param_regs_used
		= amd64_use_x64_abi ? param_regnum : param_regnum + float_param_regnum;

	/* determine how results are passed */
	size_t              n_results           = get_method_n_ress(function_type);
	unsigned            n_reg_results       = 0;
	reg_or_stackslot_t *results = XMALLOCNZ(reg_or_stackslot_t, n_results);
	unsigned            res_regnum          = 0;
	unsigned            res_float_regnum    = 0;
	size_t              n_result_regs       = ARRAY_SIZE(result_regs);
	size_t              n_float_result_regs = ARRAY_SIZE(float_result_regs);
	for (size_t i = 0; i < n_results; ++i) {
		ir_type            *result_type = get_method_res_type(function_type, i);
		ir_mode            *result_mode = get_type_mode(result_type);
		reg_or_stackslot_t *result      = &results[i];

		const arch_register_t *reg;
		if (mode_is_float(result_mode)) {
			if (res_float_regnum >= n_float_result_regs) {
				panic("too many floating points results");
			}
			reg = float_result_regs[res_float_regnum++];
		} else {
			if (res_regnum >= n_result_regs) {
				panic("too many results");
			}
			reg = result_regs[res_regnum++];
		}
		result->reg = reg;
		rbitset_clear(caller_saves, reg->global_index);
		++n_reg_results;
	}

	x86_cconv_t *cconv    = XMALLOCZ(x86_cconv_t);
	cconv->parameters     = params;
	cconv->callframe_size = stack_offset;
	cconv->n_param_regs   = n_param_regs_used;
	cconv->n_xmm_regs     = float_param_regnum;
	cconv->results        = results;
	cconv->omit_fp        = omit_fp;
	cconv->caller_saves   = caller_saves;
	cconv->callee_saves   = callee_saves;
	cconv->n_reg_results  = n_reg_results;

	if (irg != NULL) {
		be_irg_t       *birg      = be_birg_from_irg(irg);
		size_t          n_ignores = ARRAY_SIZE(ignore_regs);
		struct obstack *obst      = &birg->obst;

		birg->allocatable_regs = rbitset_obstack_alloc(obst, N_AMD64_REGISTERS);
		rbitset_set_all(birg->allocatable_regs, N_AMD64_REGISTERS);
		for (size_t r = 0; r < n_ignores; ++r) {
			rbitset_clear(birg->allocatable_regs, ignore_regs[r]);
		}
		if (!omit_fp)
			rbitset_clear(birg->allocatable_regs, REG_RBP);
	}

	return cconv;
}