Beispiel #1
0
/** patches Addresses to work in position independent code */
static void fix_pic_addresses(ir_node *const node, void *const data)
{
	(void)data;

	ir_graph      *const irg = get_irn_irg(node);
	be_main_env_t *const be  = be_get_irg_main_env(irg);
	foreach_irn_in(node, i, pred) {
		if (!is_Address(pred))
			continue;

		ir_node         *res;
		ir_entity *const entity = get_Address_entity(pred);
		dbg_info  *const dbgi   = get_irn_dbg_info(pred);
		if (i == n_Call_ptr && is_Call(node)) {
			/* Calls can jump to relative addresses, so we can directly jump to
			 * the (relatively) known call address or the trampoline */
			if (can_address_relative(entity))
				continue;

			ir_entity *const trampoline = get_trampoline(be, entity);
			res = new_rd_Address(dbgi, irg, trampoline);
		} else if (get_entity_type(entity) == get_code_type()) {
			/* Block labels can always be addressed directly. */
			continue;
		} else {
			/* Everything else is accessed relative to EIP. */
			ir_node *const block    = get_nodes_block(pred);
			ir_mode *const mode     = get_irn_mode(pred);
			ir_node *const pic_base = ia32_get_pic_base(irg);

			if (can_address_relative(entity)) {
				/* All ok now for locally constructed stuff. */
				res = new_rd_Add(dbgi, block, pic_base, pred, mode);
				/* Make sure the walker doesn't visit this add again. */
				mark_irn_visited(res);
			} else {
				/* Get entry from pic symbol segment. */
				ir_entity *const pic_symbol  = get_pic_symbol(be, entity);
				ir_node   *const pic_address = new_rd_Address(dbgi, irg, pic_symbol);
				ir_node   *const add         = new_rd_Add(dbgi, block, pic_base, pic_address, mode);
				mark_irn_visited(add);

				/* We need an extra indirection for global data outside our current
				 * module. The loads are always safe and can therefore float and
				 * need no memory input */
				ir_type *const type  = get_entity_type(entity);
				ir_node *const nomem = get_irg_no_mem(irg);
				ir_node *const load  = new_rd_Load(dbgi, block, nomem, add, mode, type, cons_floats);
				res = new_r_Proj(load, mode, pn_Load_res);
			}
		}
		set_irn_n(node, i, res);
	}
}
Beispiel #2
0
/**
 * lower 64bit addition: an 32bit add for the lower parts, an add with
 * carry for the higher parts. If the carry's value is known, fold it
 * into the upper add.
 */
static void ia32_lower_add64(ir_node *node, ir_mode *mode)
{
	dbg_info     *dbg        = get_irn_dbg_info(node);
	ir_node      *block      = get_nodes_block(node);
	ir_node      *left       = get_Add_left(node);
	ir_node      *right      = get_Add_right(node);
	ir_node      *left_low   = get_lowered_low(left);
	ir_node      *left_high  = get_lowered_high(left);
	ir_node      *right_low  = get_lowered_low(right);
	ir_node      *right_high = get_lowered_high(right);
	ir_mode      *low_mode   = get_irn_mode(left_low);
	ir_mode      *high_mode  = get_irn_mode(left_high);
	carry_result  cr         = lower_add_carry(left, right, low_mode);

	assert(get_irn_mode(left_low)  == get_irn_mode(right_low));
	assert(get_irn_mode(left_high) == get_irn_mode(right_high));

	if (cr == no_carry) {
		ir_node *add_low  = new_rd_Add(dbg, block, left_low,  right_low, low_mode);
		ir_node *add_high = new_rd_Add(dbg, block, left_high, right_high, high_mode);
		ir_set_dw_lowered(node, add_low, add_high);
	} else if (cr == must_carry && (is_Const(left_high) || is_Const(right_high))) {
		// We cannot assume that left_high and right_high form a normalized Add.
		ir_node *constant;
		ir_node *other;

		if (is_Const(left_high)) {
			constant = left_high;
			other    = right_high;
		} else {
			constant = right_high;
			other    = left_high;
		}

		ir_graph *irg            = get_irn_irg(right_high);
		ir_node  *one            = new_rd_Const(dbg, irg, get_mode_one(high_mode));
		ir_node  *const_plus_one = new_rd_Add(dbg, block, constant, one, high_mode);
		ir_node  *add_high       = new_rd_Add(dbg, block, other, const_plus_one, high_mode);
		ir_node  *add_low        = new_rd_Add(dbg, block, left_low, right_low, low_mode);
		ir_set_dw_lowered(node, add_low, add_high);
	} else {
		/* l_res = a_l + b_l */
		ir_node  *add_low    = new_bd_ia32_l_Add(dbg, block, left_low, right_low);
		ir_mode  *mode_flags = ia32_reg_classes[CLASS_ia32_flags].mode;
		ir_node  *res_low    = new_r_Proj(add_low, ia32_mode_gp, pn_ia32_l_Add_res);
		ir_node  *flags      = new_r_Proj(add_low, mode_flags, pn_ia32_l_Add_flags);

		/* h_res = a_h + b_h + carry */
		ir_node  *add_high
			= new_bd_ia32_l_Adc(dbg, block, left_high, right_high, flags, mode);
		ir_set_dw_lowered(node, res_low, add_high);
	}
}
Beispiel #3
0
/**
 * lower 64bit Mul operation.
 */
static void ia32_lower_mul64(ir_node *node, ir_mode *mode)
{
	dbg_info *dbg        = get_irn_dbg_info(node);
	ir_node  *block      = get_nodes_block(node);
	ir_node  *left       = get_Mul_left(node);
	ir_node  *right      = get_Mul_right(node);
	ir_node  *left_low   = get_lowered_low(left);
	ir_node  *left_high  = get_lowered_high(left);
	ir_node  *right_low  = get_lowered_low(right);
	ir_node  *right_high = get_lowered_high(right);

	/*
		EDX:EAX = left_low * right_low
		l_res   = EAX

		t1 = right_low * left_high
		t2 = t1 + EDX
		t3 = left_low * right_high
		h_res = t2 + t3
	*/

	/* handle the often used case of 32x32=64 mul */
	ir_node *h_res;
	ir_node *l_res;
	if (is_sign_extend(left_low, left_high)
	    && is_sign_extend(right_low, right_high)) {
		ir_node *mul = new_bd_ia32_l_IMul(dbg, block, left_low, right_low);
		h_res = new_rd_Proj(dbg, mul, mode, pn_ia32_l_IMul_res_high);
		l_res = new_rd_Proj(dbg, mul, ia32_mode_gp, pn_ia32_l_IMul_res_low);
	} else {
		/* note that zero extension is handled hare efficiently */
		ir_node *mul  = new_bd_ia32_l_Mul(dbg, block, left_low, right_low);
		ir_node *pEDX = new_rd_Proj(dbg, mul, mode, pn_ia32_l_Mul_res_high);
		l_res = new_rd_Proj(dbg, mul, ia32_mode_gp, pn_ia32_l_Mul_res_low);

		ir_node *right_lowc = new_rd_Conv(dbg, block, right_low, mode);
		ir_node *mul1 = new_rd_Mul(dbg, block, left_high, right_lowc, mode);
		ir_node *add        = new_rd_Add(dbg, block, mul1, pEDX, mode);
		ir_node *left_lowc  = new_rd_Conv(dbg, block, left_low, mode);
		ir_node *mul2 = new_rd_Mul(dbg, block, left_lowc, right_high, mode);
		h_res = new_rd_Add(dbg, block, add, mul2, mode);
	}
	ir_set_dw_lowered(node, l_res, h_res);
}
Beispiel #4
0
/**
 * lower 64bit subtraction: a 32bit sub for the lower parts, a sub
 * with borrow for the higher parts. If the borrow's value is known,
 * fold it into the upper sub.
 */
static void ia32_lower_sub64(ir_node *node, ir_mode *mode)
{
	dbg_info     *dbg        = get_irn_dbg_info(node);
	ir_node      *block      = get_nodes_block(node);
	ir_node      *left       = get_Sub_left(node);
	ir_node      *right      = get_Sub_right(node);
	ir_node      *left_low   = get_lowered_low(left);
	ir_node      *left_high  = get_lowered_high(left);
	ir_node      *right_low  = get_lowered_low(right);
	ir_node      *right_high = get_lowered_high(right);
	ir_mode      *low_mode   = get_irn_mode(left_low);
	ir_mode      *high_mode  = get_irn_mode(left_high);
	carry_result  cr         = lower_sub_borrow(left, right, low_mode);

	assert(get_irn_mode(left_low)  == get_irn_mode(right_low));
	assert(get_irn_mode(left_high) == get_irn_mode(right_high));

	if (cr == no_carry) {
		ir_node *sub_low  = new_rd_Sub(dbg, block, left_low,  right_low, low_mode);
		ir_node *sub_high = new_rd_Sub(dbg, block, left_high, right_high, high_mode);
		ir_set_dw_lowered(node, sub_low, sub_high);
	} else if (cr == must_carry && (is_Const(left_high) || is_Const(right_high))) {
		ir_node  *sub_high;
		ir_graph *irg        = get_irn_irg(right_high);
		ir_node  *one        = new_rd_Const(dbg, irg, get_mode_one(high_mode));

		if (is_Const(right_high)) {
			ir_node *new_const = new_rd_Add(dbg, block, right_high, one, high_mode);
			sub_high = new_rd_Sub(dbg, block, left_high, new_const, high_mode);
		} else if (is_Const(left_high)) {
			ir_node *new_const = new_rd_Sub(dbg, block, left_high, one, high_mode);
			sub_high = new_rd_Sub(dbg, block, new_const, right_high, high_mode);
		} else {
			panic("logic error");
		}

		ir_node  *sub_low  = new_rd_Sub(dbg, block, left_low, right_low, low_mode);
		ir_set_dw_lowered(node, sub_low, sub_high);
	} else {
		/* l_res = a_l - b_l */
		ir_node  *sub_low    = new_bd_ia32_l_Sub(dbg, block, left_low, right_low);
		ir_mode  *mode_flags = ia32_reg_classes[CLASS_ia32_flags].mode;
		ir_node  *res_low    = new_r_Proj(sub_low, ia32_mode_gp, pn_ia32_l_Sub_res);
		ir_node  *flags      = new_r_Proj(sub_low, mode_flags, pn_ia32_l_Sub_flags);

		/* h_res = a_h - b_h - carry */
		ir_node  *sub_high
			= new_bd_ia32_l_Sbb(dbg, block, left_high, right_high, flags, mode);
		ir_set_dw_lowered(node, res_low, sub_high);
	}
}
Beispiel #5
0
static void lower64_mul(ir_node *node, ir_mode *mode)
{
	dbg_info *dbgi       = get_irn_dbg_info(node);
	ir_node  *block      = get_nodes_block(node);
	ir_node  *left       = get_Mul_left(node);
	ir_node  *right      = get_Mul_right(node);
	ir_node  *left_low   = get_lowered_low(left);
	ir_node  *left_high  = get_lowered_high(left);
	ir_node  *right_low  = get_lowered_low(right);
	ir_node  *right_high = get_lowered_high(right);
	ir_node  *conv_l_low = new_rd_Conv(dbgi, block, left_low, mode);
	ir_node  *mul1       = new_rd_Mul(dbgi, block, conv_l_low, right_high,
	                                  mode);
	ir_node  *umull      = new_bd_arm_UMulL_t(dbgi, block, left_low, right_low);
	ir_mode  *umode      = get_irn_mode(right_low);
	ir_node  *umull_low  = new_r_Proj(umull, umode, pn_arm_UMulL_t_low);
	ir_node  *umull_high = new_r_Proj(umull, mode, pn_arm_UMulL_t_high);
	ir_node  *conv_r_low = new_rd_Conv(dbgi, block, right_low, mode);
	ir_node  *mul2       = new_rd_Mul(dbgi, block, conv_r_low, left_high, mode);
	ir_node  *add1       = new_rd_Add(dbgi, block, mul2, mul1, mode);
	ir_node  *add2       = new_rd_Add(dbgi, block, add1, umull_high, mode);
	ir_set_dw_lowered(node, umull_low, add2);
}
Beispiel #6
0
/**
 * Adjust the size of a node representing a stack alloc to a certain
 * stack_alignment.
 *
 * @param size       the node containing the non-aligned size
 * @param block      the block where new nodes are allocated on
 * @return a node representing the aligned size
 */
static ir_node *adjust_alloc_size(dbg_info *dbgi, ir_node *size, ir_node *block)
{
	/* Example: po2_alignment 4 (align to 16 bytes):
	 *   size = (size+15) & 0xfff...f8 */
	ir_mode   *mode    = get_irn_mode(size);
	ir_graph  *irg     = get_irn_irg(block);
	ir_tarval *allone  = get_mode_all_one(mode);
	ir_tarval *shr     = tarval_shr_unsigned(allone, po2_stack_alignment);
	ir_tarval *mask    = tarval_shl_unsigned(shr, po2_stack_alignment);
	ir_tarval *invmask = tarval_not(mask);
	ir_node   *addv    = new_r_Const(irg, invmask);
	ir_node   *add     = new_rd_Add(dbgi, block, size, addv);
	ir_node   *maskc   = new_r_Const(irg, mask);
	ir_node   *and     = new_rd_And(dbgi, block, add, maskc);
	return and;
}
Beispiel #7
0
/**
 * Adjust the size of a node representing a stack alloc to a certain
 * stack_alignment.
 *
 * @param size       the node containing the non-aligned size
 * @param block      the block where new nodes are allocated on
 * @return a node representing the aligned size
 */
static ir_node *adjust_alloc_size(dbg_info *dbgi, ir_node *size, ir_node *block)
{
	if (stack_alignment <= 1)
		return size;
	if (is_Const(size) && !lower_constant_sizes)
		return size;

	ir_mode   *mode = get_irn_mode(size);
	ir_tarval *tv   = new_tarval_from_long(stack_alignment-1, mode);
	ir_graph  *irg  = get_Block_irg(block);
	ir_node   *mask = new_r_Const(irg, tv);
	size = new_rd_Add(dbgi, block, size, mask, mode);
	tv   = new_tarval_from_long(-(long)stack_alignment, mode);
	mask = new_r_Const(irg, tv);
	size = new_rd_And(dbgi, block, size, mask, mode);
	return size;
}
Beispiel #8
0
void be_default_lower_va_arg(ir_node *const node, bool const compound_is_ptr,
                             unsigned const stack_param_align)
{
	ir_node  *block = get_nodes_block(node);
	dbg_info *dbgi  = get_irn_dbg_info(node);
	ir_graph *irg   = get_irn_irg(node);

	ir_type       *aptype   = get_method_res_type(get_Builtin_type(node), 0);
	ir_node *const ap       = get_irn_n(node, 1);
	ir_node *const node_mem = get_Builtin_mem(node);

	ir_mode *apmode = get_type_mode(aptype);
	ir_node *res;
	ir_node *new_mem;
	if (apmode) {
		goto load;
	} else if (compound_is_ptr) {
		apmode = mode_P;
		aptype = get_type_for_mode(apmode);
load:;
		ir_node *const load = new_rd_Load(dbgi, block, node_mem, ap, apmode, aptype, cons_none);
		res     = new_r_Proj(load, apmode, pn_Load_res);
		new_mem = new_r_Proj(load, mode_M,pn_Load_M);
	} else {
		/* aptype has no associated mode, so it is represented as a pointer. */
		res     = ap;
		new_mem = node_mem;
	}

	unsigned const round_up    = round_up2(get_type_size(aptype),
	                                       stack_param_align);
	ir_mode *const offset_mode = get_reference_offset_mode(mode_P);
	ir_node *const offset      = new_r_Const_long(irg, offset_mode, round_up);
	ir_node *const new_ap      = new_rd_Add(dbgi, block, ap, offset);

	ir_node *const in[] = { new_mem, res, new_ap };
	turn_into_tuple(node, ARRAY_SIZE(in), in);
}
Beispiel #9
0
static void transform_Proj_Alloc(ir_node *node)
{
	/* we might need a result adjustment */
	if (addr_delta == 0)
		return;
	if (get_Proj_proj(node) != pn_Alloc_res)
		return;
	if (ir_nodeset_contains(&transformed, node))
		return;

	ir_node  *const alloc = get_Proj_pred(node);
	dbg_info *const dbgi  = get_irn_dbg_info(alloc);
	ir_graph *const irg   = get_irn_irg(node);
	ir_node  *const block = get_nodes_block(node);
	ir_node  *const delta = new_r_Const_long(irg, mode_P, addr_delta);
	ir_node  *const dummy = new_r_Dummy(irg, mode_P);
	ir_node  *const add   = new_rd_Add(dbgi, block, dummy, delta, mode_P);

	exchange(node, add);
	ir_node *const new_proj = new_r_Proj(alloc, mode_P, pn_Alloc_res);
	set_Add_left(add, new_proj);
	ir_nodeset_insert(&transformed, new_proj);
}
Beispiel #10
0
/**
 * lower 64bit conversions
 */
static void ia32_lower_conv64(ir_node *node, ir_mode *mode)
{
	dbg_info  *dbg       = get_irn_dbg_info(node);
	ir_node   *op        = get_Conv_op(node);
	ir_mode   *mode_from = get_irn_mode(op);
	ir_mode   *mode_to   = get_irn_mode(node);

	if (mode_is_float(mode_from) && get_mode_size_bits(mode_to) == 64
	    && get_mode_arithmetic(mode_to) == irma_twos_complement) {
		/* We have a Conv float -> long long here */
		ir_node *float_to_ll;
		ir_node *l_res;
		ir_node *h_res;
		if (mode_is_signed(mode)) {
			/* convert from float to signed 64bit */
			ir_node *block = get_nodes_block(node);
			float_to_ll = new_bd_ia32_l_FloattoLL(dbg, block, op);
			l_res = new_r_Proj(float_to_ll, ia32_mode_gp,
			                   pn_ia32_l_FloattoLL_res_low);
			h_res = new_r_Proj(float_to_ll, mode,
							   pn_ia32_l_FloattoLL_res_high);
		} else {
			/* Convert from float to unsigned 64bit. */
			ir_graph  *irg = get_irn_irg(node);
			ir_tarval *flt_tv
				= new_tarval_from_str("9223372036854775808", 19, x86_mode_E);
			ir_node   *flt_corr  = new_r_Const(irg, flt_tv);

			ir_node *lower_blk = part_block_dw(node);
			ir_node *upper_blk = get_nodes_block(node);
			set_dw_control_flow_changed();

			ir_node *opc  = new_rd_Conv(dbg, upper_blk, op, x86_mode_E);
			ir_node *cmp  = new_rd_Cmp(dbg, upper_blk, opc, flt_corr,
			                           ir_relation_less);
			ir_node *cond = new_rd_Cond(dbg, upper_blk, cmp);
			ir_node *in[] = {
				new_r_Proj(cond, mode_X, pn_Cond_true),
				new_r_Proj(cond, mode_X, pn_Cond_false)
			};
			ir_node *blk   = new_r_Block(irg, 1, &in[1]);
			in[1] = new_r_Jmp(blk);

			set_irn_in(lower_blk, 2, in);

			/* create to Phis */
			ir_node *phi_in[] = {
				new_r_Const_null(irg, mode),
				new_r_Const_long(irg, mode, 0x80000000)
			};
			ir_node *int_phi
				= new_r_Phi(lower_blk, ARRAY_SIZE(phi_in), phi_in, mode);

			ir_node *fphi_in[] = {
				opc,
				new_rd_Sub(dbg, upper_blk, opc, flt_corr, x86_mode_E)
			};
			ir_node *flt_phi
				= new_r_Phi(lower_blk, ARRAY_SIZE(fphi_in), fphi_in,
				            x86_mode_E);

			/* fix Phi links for next part_block() */
			if (is_Phi(int_phi))
				add_Block_phi(lower_blk, int_phi);
			if (is_Phi(flt_phi))
				add_Block_phi(lower_blk, flt_phi);

			float_to_ll = new_bd_ia32_l_FloattoLL(dbg, lower_blk, flt_phi);
			l_res = new_r_Proj(float_to_ll, ia32_mode_gp,
							   pn_ia32_l_FloattoLL_res_low);
			h_res = new_r_Proj(float_to_ll, mode,
							   pn_ia32_l_FloattoLL_res_high);
			h_res = new_rd_Add(dbg, lower_blk, h_res, int_phi, mode);

			/* move the call and its Proj's to the lower block */
			set_nodes_block(node, lower_blk);
			for (ir_node *proj = (ir_node*)get_irn_link(node); proj != NULL;
			     proj = (ir_node*)get_irn_link(proj)) {
				set_nodes_block(proj, lower_blk);
			}
		}
		ir_set_dw_lowered(node, l_res, h_res);
	} else if (get_mode_size_bits(mode_from) == 64
	           && get_mode_arithmetic(mode_from) == irma_twos_complement
	           && mode_is_float(mode_to)) {
		/* We have a Conv long long -> float here */
		ir_node *op_low  = get_lowered_low(op);
		ir_node *op_high = get_lowered_high(op);
		ir_node *block   = get_nodes_block(node);
		ir_node *ll_to_float
			= new_bd_ia32_l_LLtoFloat(dbg, block, op_high, op_low, mode_to);

		exchange(node, ll_to_float);
	} else {
		ir_default_lower_dw_Conv(node, mode);
	}
}
Beispiel #11
0
/**
 * Lower a Sel node. Do not touch Sels accessing entities on the frame type.
 */
static void lower_sel(ir_node *sel)
{
	ir_graph  *irg   = get_irn_irg(sel);
	ir_entity *ent   = get_Sel_entity(sel);
	ir_type   *owner = get_entity_owner(ent);
	dbg_info  *dbg   = get_irn_dbg_info(sel);
	ir_mode   *mode  = get_irn_mode(sel);
	ir_node   *bl    = get_nodes_block(sel);
	ir_node   *newn;

	/* we can only replace Sels when the layout of the owner type is decided. */
	if (get_type_state(owner) != layout_fixed)
		return;

	if (0 < get_Sel_n_indexs(sel)) {
		/* an Array access */
		ir_type *basetyp = get_entity_type(ent);
		ir_mode *basemode;
		ir_node *index;
		if (is_Primitive_type(basetyp))
			basemode = get_type_mode(basetyp);
		else
			basemode = mode_P_data;

		assert(basemode && "no mode for lowering Sel");
		assert((get_mode_size_bits(basemode) % 8 == 0) && "can not deal with unorthodox modes");
		index = get_Sel_index(sel, 0);

		if (is_Array_type(owner)) {
			ir_type *arr_ty = owner;
			size_t   dims   = get_array_n_dimensions(arr_ty);
			size_t  *map    = ALLOCAN(size_t, dims);
			ir_mode *mode_Int = get_reference_mode_signed_eq(mode);
			ir_tarval *tv;
			ir_node *last_size;
			size_t   i;

			assert(dims == (size_t)get_Sel_n_indexs(sel)
				&& "array dimension must match number of indices of Sel node");

			for (i = 0; i < dims; i++) {
				size_t order = get_array_order(arr_ty, i);

				assert(order < dims &&
					"order of a dimension must be smaller than the arrays dim");
				map[order] = i;
			}
			newn = get_Sel_ptr(sel);

			/* Size of the array element */
			tv = new_tarval_from_long(get_type_size_bytes(basetyp), mode_Int);
			last_size = new_rd_Const(dbg, irg, tv);

			/*
			 * We compute the offset part of dimension d_i recursively
			 * with the the offset part of dimension d_{i-1}
			 *
			 *     off_0 = sizeof(array_element_type);
			 *     off_i = (u_i - l_i) * off_{i-1}  ; i >= 1
			 *
			 * whereas u_i is the upper bound of the current dimension
			 * and l_i the lower bound of the current dimension.
			 */
			for (i = dims; i > 0;) {
				size_t dim = map[--i];
				ir_node *lb, *ub, *elms, *n, *ind;

				elms = NULL;
				lb = get_array_lower_bound(arr_ty, dim);
				ub = get_array_upper_bound(arr_ty, dim);

				if (! is_Unknown(lb))
					lb = new_rd_Conv(dbg, bl, copy_const_value(get_irn_dbg_info(sel), lb, bl), mode_Int);
				else
					lb = NULL;

				if (! is_Unknown(ub))
					ub = new_rd_Conv(dbg, bl, copy_const_value(get_irn_dbg_info(sel), ub, bl), mode_Int);
				else
					ub = NULL;

				/*
				 * If the array has more than one dimension, lower and upper
				 * bounds have to be set in the non-last dimension.
				 */
				if (i > 0) {
					assert(lb != NULL && "lower bound has to be set in multi-dim array");
					assert(ub != NULL && "upper bound has to be set in multi-dim array");

					/* Elements in one Dimension */
					elms = new_rd_Sub(dbg, bl, ub, lb, mode_Int);
				}

				ind = new_rd_Conv(dbg, bl, get_Sel_index(sel, dim), mode_Int);

				/*
				 * Normalize index, id lower bound is set, also assume
				 * lower bound == 0
			 */
				if (lb != NULL)
					ind = new_rd_Sub(dbg, bl, ind, lb, mode_Int);

				n = new_rd_Mul(dbg, bl, ind, last_size, mode_Int);

				/*
				 * see comment above.
				 */
				if (i > 0)
					last_size = new_rd_Mul(dbg, bl, last_size, elms, mode_Int);

				newn = new_rd_Add(dbg, bl, newn, n, mode);
			}
		} else {
			/* no array type */
			ir_mode   *idx_mode = get_irn_mode(index);
			ir_tarval *tv       = new_tarval_from_long(get_mode_size_bytes(basemode), idx_mode);

			newn = new_rd_Add(dbg, bl, get_Sel_ptr(sel),
				new_rd_Mul(dbg, bl, index,
				new_r_Const(irg, tv),
				idx_mode),
				mode);
		}
	} else if (is_Method_type(get_entity_type(ent)) && is_Class_type(owner)) {
		/* We need an additional load when accessing methods from a dispatch
		 * table.
		 * Matze TODO: Is this really still used? At least liboo does its own
		 * lowering of Method-Sels...
		 */
		ir_mode   *ent_mode = get_type_mode(get_entity_type(ent));
		int        offset   = get_entity_offset(ent);
		ir_mode   *mode_Int = get_reference_mode_signed_eq(mode);
		ir_tarval *tv       = new_tarval_from_long(offset, mode_Int);
		ir_node   *cnst     = new_rd_Const(dbg, irg, tv);
		ir_node   *add      = new_rd_Add(dbg, bl, get_Sel_ptr(sel), cnst, mode);
		ir_node   *mem      = get_Sel_mem(sel);
		newn = new_rd_Load(dbg, bl, mem, add, ent_mode, cons_none);
		newn = new_r_Proj(newn, ent_mode, pn_Load_res);
	} else {
		int offset = get_entity_offset(ent);

		/* replace Sel by add(obj, const(ent.offset)) */
		newn = get_Sel_ptr(sel);
		if (offset != 0) {
			ir_mode   *mode_UInt = get_reference_mode_unsigned_eq(mode);
			ir_tarval *tv        = new_tarval_from_long(offset, mode_UInt);
			ir_node   *cnst      = new_r_Const(irg, tv);
			newn = new_rd_Add(dbg, bl, newn, cnst, mode);
		}
	}

	/* run the hooks */
	hook_lower(sel);

	exchange(sel, newn);
}
Beispiel #12
0
ir_node *copy_const_value(dbg_info *dbg, ir_node *n, ir_node *block)
{
	ir_graph *irg = get_irn_irg(block);

	/* @@@ GL I think we should implement this using the routines from irgopt
	 * for dead node elimination/inlineing. */
	ir_mode *m = get_irn_mode(n);
	ir_node *nn;
	switch (get_irn_opcode(n)) {
	case iro_Const:
		nn = new_rd_Const(dbg, irg, get_Const_tarval(n));
		break;
	case iro_SymConst:
		nn = new_rd_SymConst(dbg, irg, get_irn_mode(n), get_SymConst_symbol(n), get_SymConst_kind(n));
		break;
	case iro_Add:
		nn = new_rd_Add(dbg, block,
		                copy_const_value(dbg, get_Add_left(n), block),
		                copy_const_value(dbg, get_Add_right(n), block), m);
		break;
	case iro_Sub:
		nn = new_rd_Sub(dbg, block,
		                copy_const_value(dbg, get_Sub_left(n), block),
		                copy_const_value(dbg, get_Sub_right(n), block), m);
		break;
	case iro_Mul:
		nn = new_rd_Mul(dbg, block,
		                copy_const_value(dbg, get_Mul_left(n), block),
		                copy_const_value(dbg, get_Mul_right(n), block), m);
		break;
	case iro_And:
		nn = new_rd_And(dbg, block,
		                copy_const_value(dbg, get_And_left(n), block),
		                copy_const_value(dbg, get_And_right(n), block), m);
		break;
	case iro_Or:
		nn = new_rd_Or(dbg, block,
		               copy_const_value(dbg, get_Or_left(n), block),
		               copy_const_value(dbg, get_Or_right(n), block), m);
		break;
	case iro_Eor:
		nn = new_rd_Eor(dbg, block,
		                copy_const_value(dbg, get_Eor_left(n), block),
		                copy_const_value(dbg, get_Eor_right(n), block), m);
		break;
	case iro_Conv:
		nn = new_rd_Conv(dbg, block,
		                 copy_const_value(dbg, get_Conv_op(n), block), m);
		break;
	case iro_Minus:
		nn = new_rd_Minus(dbg, block,
		                  copy_const_value(dbg, get_Minus_op(n), block), m);
		break;
	case iro_Not:
		nn = new_rd_Not(dbg, block,
		                copy_const_value(dbg, get_Not_op(n), block), m);
		break;
	case iro_Unknown:
		nn = new_r_Unknown(irg, m);
		break;
	default:
		panic("opcode invalid or not implemented %+F", n);
	}
	return nn;
}