Exemple #1
0
/** patches Addresses to work in position independent code */
static void fix_pic_addresses(ir_node *const node, void *const data)
{
	(void)data;

	ir_graph      *const irg = get_irn_irg(node);
	be_main_env_t *const be  = be_get_irg_main_env(irg);
	foreach_irn_in(node, i, pred) {
		if (!is_Address(pred))
			continue;

		ir_node         *res;
		ir_entity *const entity = get_Address_entity(pred);
		dbg_info  *const dbgi   = get_irn_dbg_info(pred);
		if (i == n_Call_ptr && is_Call(node)) {
			/* Calls can jump to relative addresses, so we can directly jump to
			 * the (relatively) known call address or the trampoline */
			if (can_address_relative(entity))
				continue;

			ir_entity *const trampoline = get_trampoline(be, entity);
			res = new_rd_Address(dbgi, irg, trampoline);
		} else if (get_entity_type(entity) == get_code_type()) {
			/* Block labels can always be addressed directly. */
			continue;
		} else {
			/* Everything else is accessed relative to EIP. */
			ir_node *const block    = get_nodes_block(pred);
			ir_mode *const mode     = get_irn_mode(pred);
			ir_node *const pic_base = ia32_get_pic_base(irg);

			if (can_address_relative(entity)) {
				/* All ok now for locally constructed stuff. */
				res = new_rd_Add(dbgi, block, pic_base, pred, mode);
				/* Make sure the walker doesn't visit this add again. */
				mark_irn_visited(res);
			} else {
				/* Get entry from pic symbol segment. */
				ir_entity *const pic_symbol  = get_pic_symbol(be, entity);
				ir_node   *const pic_address = new_rd_Address(dbgi, irg, pic_symbol);
				ir_node   *const add         = new_rd_Add(dbgi, block, pic_base, pic_address, mode);
				mark_irn_visited(add);

				/* We need an extra indirection for global data outside our current
				 * module. The loads are always safe and can therefore float and
				 * need no memory input */
				ir_type *const type  = get_entity_type(entity);
				ir_node *const nomem = get_irg_no_mem(irg);
				ir_node *const load  = new_rd_Load(dbgi, block, nomem, add, mode, type, cons_floats);
				res = new_r_Proj(load, mode, pn_Load_res);
			}
		}
		set_irn_n(node, i, res);
	}
}
Exemple #2
0
static ir_node *gcji_get_arraylength(dbg_info *dbgi, ir_node *block,
                                     ir_node *arrayref, ir_node **mem)
{
	ir_node  *addr    = new_r_Member(block, arrayref, gcj_array_length);
	ir_node  *load    = new_rd_Load(dbgi, block, *mem, addr, mode_int,
	                                get_type_for_mode(mode_int), cons_none);
	ir_node  *new_mem = new_r_Proj(load, mode_M, pn_Load_M);
	ir_node  *res     = new_r_Proj(load, mode_int, pn_Load_res);
	*mem = new_mem;
	return res;
}
Exemple #3
0
static ir_node *create_gotpcrel_load(ir_graph  *irg, ir_entity *const entity)
{
	ir_node *const addr
		= be_new_Relocation(irg, X86_IMM_GOTPCREL, entity, mode_P);
	ir_type *const type  = get_entity_type(entity);
	ir_node *const nomem = get_irg_no_mem(irg);
	ir_node *const block = get_irg_start_block(irg);
	ir_node *const load  = new_rd_Load(NULL, block, nomem, addr, mode_P,
									   type, cons_floats);
	return new_r_Proj(load, mode_P, pn_Load_res);
}
Exemple #4
0
/**
 * Turn a small CopyB node into a series of Load/Store nodes.
 */
static void lower_small_copyb_node(ir_node *irn)
{
	ir_graph      *irg         = get_irn_irg(irn);
	dbg_info      *dbgi        = get_irn_dbg_info(irn);
	ir_node       *block       = get_nodes_block(irn);
	ir_type       *tp          = get_CopyB_type(irn);
	ir_node       *addr_src    = get_CopyB_src(irn);
	ir_node       *addr_dst    = get_CopyB_dst(irn);
	ir_node       *mem         = get_CopyB_mem(irn);
	ir_mode       *mode_ref    = get_irn_mode(addr_src);
	unsigned       mode_bytes  = allow_misalignments ? native_mode_bytes
	                                                 : get_type_alignment(tp);
	unsigned       size        = get_type_size(tp);
	unsigned       offset      = 0;
	bool           is_volatile = get_CopyB_volatility(irn) == volatility_is_volatile;
	ir_cons_flags  flags       = is_volatile ? cons_volatile : cons_none;

	while (offset < size) {
		ir_mode *mode = get_ir_mode(mode_bytes);
		for (; offset + mode_bytes <= size; offset += mode_bytes) {
			ir_mode *mode_ref_int = get_reference_offset_mode(mode_ref);

			/* construct offset */
			ir_node *addr_const = new_r_Const_long(irg, mode_ref_int, offset);
			ir_node *add        = new_r_Add(block, addr_src, addr_const);

			ir_node *load     = new_rd_Load(dbgi, block, mem, add, mode, tp, flags);
			ir_node *load_res = new_r_Proj(load, mode, pn_Load_res);
			ir_node *load_mem = new_r_Proj(load, mode_M, pn_Load_M);

			ir_node *addr_const2 = new_r_Const_long(irg, mode_ref_int, offset);
			ir_node *add2        = new_r_Add(block, addr_dst, addr_const2);

			ir_node *store     = new_rd_Store(dbgi, block, load_mem, add2, load_res, tp, flags);
			ir_node *store_mem = new_r_Proj(store, mode_M, pn_Store_M);

			mem = store_mem;
		}

		mode_bytes /= 2;
	}

	exchange(irn, mem);
}
Exemple #5
0
void be_default_lower_va_arg(ir_node *const node, bool const compound_is_ptr,
                             unsigned const stack_param_align)
{
	ir_node  *block = get_nodes_block(node);
	dbg_info *dbgi  = get_irn_dbg_info(node);
	ir_graph *irg   = get_irn_irg(node);

	ir_type       *aptype   = get_method_res_type(get_Builtin_type(node), 0);
	ir_node *const ap       = get_irn_n(node, 1);
	ir_node *const node_mem = get_Builtin_mem(node);

	ir_mode *apmode = get_type_mode(aptype);
	ir_node *res;
	ir_node *new_mem;
	if (apmode) {
		goto load;
	} else if (compound_is_ptr) {
		apmode = mode_P;
		aptype = get_type_for_mode(apmode);
load:;
		ir_node *const load = new_rd_Load(dbgi, block, node_mem, ap, apmode, aptype, cons_none);
		res     = new_r_Proj(load, apmode, pn_Load_res);
		new_mem = new_r_Proj(load, mode_M,pn_Load_M);
	} else {
		/* aptype has no associated mode, so it is represented as a pointer. */
		res     = ap;
		new_mem = node_mem;
	}

	unsigned const round_up    = round_up2(get_type_size(aptype),
	                                       stack_param_align);
	ir_mode *const offset_mode = get_reference_offset_mode(mode_P);
	ir_node *const offset      = new_r_Const_long(irg, offset_mode, round_up);
	ir_node *const new_ap      = new_rd_Add(dbgi, block, ap, offset);

	ir_node *const in[] = { new_mem, res, new_ap };
	turn_into_tuple(node, ARRAY_SIZE(in), in);
}
Exemple #6
0
/**
 * Lower a Sel node. Do not touch Sels accessing entities on the frame type.
 */
static void lower_sel(ir_node *sel)
{
	ir_graph  *irg   = get_irn_irg(sel);
	ir_entity *ent   = get_Sel_entity(sel);
	ir_type   *owner = get_entity_owner(ent);
	dbg_info  *dbg   = get_irn_dbg_info(sel);
	ir_mode   *mode  = get_irn_mode(sel);
	ir_node   *bl    = get_nodes_block(sel);
	ir_node   *newn;

	/* we can only replace Sels when the layout of the owner type is decided. */
	if (get_type_state(owner) != layout_fixed)
		return;

	if (0 < get_Sel_n_indexs(sel)) {
		/* an Array access */
		ir_type *basetyp = get_entity_type(ent);
		ir_mode *basemode;
		ir_node *index;
		if (is_Primitive_type(basetyp))
			basemode = get_type_mode(basetyp);
		else
			basemode = mode_P_data;

		assert(basemode && "no mode for lowering Sel");
		assert((get_mode_size_bits(basemode) % 8 == 0) && "can not deal with unorthodox modes");
		index = get_Sel_index(sel, 0);

		if (is_Array_type(owner)) {
			ir_type *arr_ty = owner;
			size_t   dims   = get_array_n_dimensions(arr_ty);
			size_t  *map    = ALLOCAN(size_t, dims);
			ir_mode *mode_Int = get_reference_mode_signed_eq(mode);
			ir_tarval *tv;
			ir_node *last_size;
			size_t   i;

			assert(dims == (size_t)get_Sel_n_indexs(sel)
				&& "array dimension must match number of indices of Sel node");

			for (i = 0; i < dims; i++) {
				size_t order = get_array_order(arr_ty, i);

				assert(order < dims &&
					"order of a dimension must be smaller than the arrays dim");
				map[order] = i;
			}
			newn = get_Sel_ptr(sel);

			/* Size of the array element */
			tv = new_tarval_from_long(get_type_size_bytes(basetyp), mode_Int);
			last_size = new_rd_Const(dbg, irg, tv);

			/*
			 * We compute the offset part of dimension d_i recursively
			 * with the the offset part of dimension d_{i-1}
			 *
			 *     off_0 = sizeof(array_element_type);
			 *     off_i = (u_i - l_i) * off_{i-1}  ; i >= 1
			 *
			 * whereas u_i is the upper bound of the current dimension
			 * and l_i the lower bound of the current dimension.
			 */
			for (i = dims; i > 0;) {
				size_t dim = map[--i];
				ir_node *lb, *ub, *elms, *n, *ind;

				elms = NULL;
				lb = get_array_lower_bound(arr_ty, dim);
				ub = get_array_upper_bound(arr_ty, dim);

				if (! is_Unknown(lb))
					lb = new_rd_Conv(dbg, bl, copy_const_value(get_irn_dbg_info(sel), lb, bl), mode_Int);
				else
					lb = NULL;

				if (! is_Unknown(ub))
					ub = new_rd_Conv(dbg, bl, copy_const_value(get_irn_dbg_info(sel), ub, bl), mode_Int);
				else
					ub = NULL;

				/*
				 * If the array has more than one dimension, lower and upper
				 * bounds have to be set in the non-last dimension.
				 */
				if (i > 0) {
					assert(lb != NULL && "lower bound has to be set in multi-dim array");
					assert(ub != NULL && "upper bound has to be set in multi-dim array");

					/* Elements in one Dimension */
					elms = new_rd_Sub(dbg, bl, ub, lb, mode_Int);
				}

				ind = new_rd_Conv(dbg, bl, get_Sel_index(sel, dim), mode_Int);

				/*
				 * Normalize index, id lower bound is set, also assume
				 * lower bound == 0
			 */
				if (lb != NULL)
					ind = new_rd_Sub(dbg, bl, ind, lb, mode_Int);

				n = new_rd_Mul(dbg, bl, ind, last_size, mode_Int);

				/*
				 * see comment above.
				 */
				if (i > 0)
					last_size = new_rd_Mul(dbg, bl, last_size, elms, mode_Int);

				newn = new_rd_Add(dbg, bl, newn, n, mode);
			}
		} else {
			/* no array type */
			ir_mode   *idx_mode = get_irn_mode(index);
			ir_tarval *tv       = new_tarval_from_long(get_mode_size_bytes(basemode), idx_mode);

			newn = new_rd_Add(dbg, bl, get_Sel_ptr(sel),
				new_rd_Mul(dbg, bl, index,
				new_r_Const(irg, tv),
				idx_mode),
				mode);
		}
	} else if (is_Method_type(get_entity_type(ent)) && is_Class_type(owner)) {
		/* We need an additional load when accessing methods from a dispatch
		 * table.
		 * Matze TODO: Is this really still used? At least liboo does its own
		 * lowering of Method-Sels...
		 */
		ir_mode   *ent_mode = get_type_mode(get_entity_type(ent));
		int        offset   = get_entity_offset(ent);
		ir_mode   *mode_Int = get_reference_mode_signed_eq(mode);
		ir_tarval *tv       = new_tarval_from_long(offset, mode_Int);
		ir_node   *cnst     = new_rd_Const(dbg, irg, tv);
		ir_node   *add      = new_rd_Add(dbg, bl, get_Sel_ptr(sel), cnst, mode);
		ir_node   *mem      = get_Sel_mem(sel);
		newn = new_rd_Load(dbg, bl, mem, add, ent_mode, cons_none);
		newn = new_r_Proj(newn, ent_mode, pn_Load_res);
	} else {
		int offset = get_entity_offset(ent);

		/* replace Sel by add(obj, const(ent.offset)) */
		newn = get_Sel_ptr(sel);
		if (offset != 0) {
			ir_mode   *mode_UInt = get_reference_mode_unsigned_eq(mode);
			ir_tarval *tv        = new_tarval_from_long(offset, mode_UInt);
			ir_node   *cnst      = new_r_Const(irg, tv);
			newn = new_rd_Add(dbg, bl, newn, cnst, mode);
		}
	}

	/* run the hooks */
	hook_lower(sel);

	exchange(sel, newn);
}