Beispiel #1
0
void dmemory_lower_Alloc(ir_node *node)
{
	assert(is_Alloc(node));

	if (get_Alloc_where(node) != heap_alloc)
		return;

	ir_graph *irg     = get_irn_irg(node);
	ir_type  *type    = get_Alloc_type(node);
	ir_node  *count   = get_Alloc_count(node);
	ir_node  *res     = NULL;
	ir_node  *cur_mem = get_Alloc_mem(node);
	ir_node  *block   = get_nodes_block(node);

	if (is_Class_type(type)) {
		res = (*dmemory_model.alloc_object)(type, irg, block, &cur_mem);
		ddispatch_prepare_new_instance(type, res, irg, block, &cur_mem);
	} else if (is_Array_type(type)) {
		ir_type *eltype  = get_array_element_type(type);
		res = (*dmemory_model.alloc_array)(eltype, count, irg, block, &cur_mem);
	} else {
		assert (0);
	}

	turn_into_tuple(node, pn_Alloc_max);
	set_irn_n(node, pn_Alloc_M, cur_mem);
	set_irn_n(node, pn_Alloc_res, res);
}
Beispiel #2
0
static ir_type *get_alive_subclass(ir_type *klass)
{
	assert (is_Class_type(klass));
	if (oo_get_class_is_extern(klass))
		return NULL; // we cannot be sure that we know all existing subclasses

	// FIXME: should check the existance of native methods

	ir_type *alive_class = NULL;
	size_t   alive_count = 0;
	if (rta_is_alive_class(klass)) {
		alive_class = klass;
		alive_count++;
	}

	if (! oo_get_class_is_final(klass)) {
		ir_type *iter = get_class_trans_subtype_first(klass);
		while (iter) {
			if (rta_is_alive_class(iter)) {
				alive_class = iter;
				alive_count++;
			}
			iter = get_class_trans_subtype_next(klass);
		}
	}

	if (alive_count == 1)
		return alive_class;
	else
		return NULL;
}
Beispiel #3
0
ir_entity *gcji_get_rtti_entity(ir_type *type)
{
	if (is_Pointer_type(type)) {
		ir_type *points_to = get_pointer_points_to_type(type);
		if (is_Class_type(points_to))
			return oo_get_class_rtti_entity(points_to);
		return NULL;
	} else if (is_Primitive_type(type)) {
		if (type == type_boolean)
			return gcj_boolean_rtti_entity;
		if (type == type_byte)
			return gcj_byte_rtti_entity;
		if (type == type_char)
			return gcj_char_rtti_entity;
		if (type == type_short)
			return gcj_short_rtti_entity;
		if (type == type_int)
			return gcj_int_rtti_entity;
		if (type == type_long)
			return gcj_long_rtti_entity;
		if (type == type_float)
			return gcj_float_rtti_entity;
		if (type == type_double)
			return gcj_double_rtti_entity;
		panic("unexpected type");
	} else {
		return oo_get_class_rtti_entity(type);
	}
}
Beispiel #4
0
void gcji_class_init(ir_type *type)
{
	assert(is_Class_type(type));

	ir_node *addr      = new_Address(gcj_init_entity);
	ir_node *jclass    = gcji_get_runtime_classinfo(type);
	ir_node *args[]    = { jclass };
	ir_type *call_type = get_entity_type(gcj_init_entity);
	ir_node *mem       = get_store();
	ir_node *call      = new_Call(mem, addr, ARRAY_SIZE(args), args, call_type);
	ir_node *new_mem   = new_Proj(call, mode_M, pn_Call_M);
	set_store(new_mem);
}
Beispiel #5
0
ir_node *dmemory_default_alloc_array(ir_type *eltype, ir_node *count, ir_graph *irg, ir_node *block, ir_node **mem)
{
	ir_node *cur_mem      = *mem;

	unsigned count_size   = get_mode_size_bytes(default_arraylength_mode);
	unsigned element_size = is_Class_type(eltype) ? get_mode_size_bytes(mode_P) : get_type_size_bytes(eltype); // FIXME: some langs support arrays of structs.
	/* increase element count so we have enough space for a counter
			   at the front */
	unsigned add_size     = (element_size + (count_size-1)) / count_size;
	ir_node *count_u      = new_r_Conv(block, count, mode_Iu);
	ir_node *addv         = new_r_Const_long(irg, mode_Iu, add_size);
	ir_node *add1         = new_r_Add(block, count_u, addv, mode_Iu);
	ir_node *elsizev      = new_r_Const_long(irg, mode_Iu, element_size);

	ir_node *size         = new_r_Mul(block, add1, elsizev, mode_Iu);
	unsigned addr_delta   = add_size * element_size;

	symconst_symbol calloc_sym;
	calloc_sym.entity_p   = calloc_entity;
	ir_node *callee       = new_r_SymConst(irg, mode_P, calloc_sym, symconst_addr_ent);

	ir_node *one          = new_r_Const_long(irg, mode_Iu, 1);
	ir_node *in[2]        = { one, size };
	ir_type *call_type    = get_entity_type(calloc_entity);
	ir_node *call         = new_r_Call(block, cur_mem, callee, 2, in, call_type);
	cur_mem               = new_r_Proj(call, mode_M, pn_Call_M);
	ir_node *ress         = new_r_Proj(call, mode_T, pn_Call_T_result);
	ir_node *res          = new_r_Proj(ress, mode_P, 0);

	/* write length of array */
	ir_node *len_value    = new_r_Conv(block, count, default_arraylength_mode);

	ir_node *len_delta    = new_r_Const_long(irg, mode_P, (int)addr_delta-4); //FIXME: replace magic num
	ir_node *len_addr     = new_r_Add(block, res, len_delta, mode_P);
	ir_node *store        = new_r_Store(block, cur_mem, len_addr, len_value, cons_none);
	cur_mem               = new_r_Proj(store, mode_M, pn_Store_M);

	if (addr_delta > 0) {
		ir_node *delta = new_r_Const_long(irg, mode_P, (int)addr_delta);
		res = new_r_Add(block, res, delta, mode_P);
	}

	*mem = cur_mem;
	return res;
}
Beispiel #6
0
ir_node *gcji_allocate_object(ir_type *type)
{
	assert(is_Class_type(type));

	ir_node *addr      = new_Address(gcj_alloc_entity);
	ir_node *jclass    = gcji_get_runtime_classinfo(type);
	ir_node *args[]    = { jclass };
	ir_type *call_type = get_entity_type(gcj_alloc_entity);
	ir_node *mem       = get_store();
	ir_node *call      = new_Call(mem, addr, ARRAY_SIZE(args), args, call_type);
    ir_node *new_mem   = new_Proj(call, mode_M, pn_Call_M);
	ir_node *ress      = new_Proj(call, mode_T, pn_Call_T_result);
	ir_node *res       = new_Proj(ress, mode_reference, 0);

	ir_node *assure_vptr = new_VptrIsSet(new_mem, res, type);
	ir_node *new_mem2    = new_Proj(assure_vptr, mode_M, pn_VptrIsSet_M);
	ir_node *res2        = new_Proj(assure_vptr, mode_reference,
	                                pn_VptrIsSet_res);
	set_store(new_mem2);
	return res2;
}
Beispiel #7
0
static bool check_compound_type(const ir_type *tp)
{
	bool fine     = true;
	bool is_class = is_Class_type(tp);
	for (size_t i = 0, n = get_compound_n_members(tp); i < n; ++i) {
		ir_entity *member = get_compound_member(tp, i);
		if (member == NULL) {
			report_error("%+F has a NULL member\n", tp);
			fine = false;
			continue;
		}
		ir_type *owner = get_entity_owner(member);
		if (owner != tp) {
			report_error("member %+F of %+F has owner %+F\n", member, tp, owner);
			fine = false;
		}
		if (is_class) {
			fine &= check_class_member(tp, member);
		}
	}
	return fine;
}
Beispiel #8
0
static ir_entity *emit_type_signature(ir_type *type)
{
	ir_type *curtype = type;
	unsigned n_pointer_levels = 0;

	while (is_Pointer_type(curtype)) {
		n_pointer_levels++;
		curtype = get_pointer_points_to_type(curtype);
	}

	struct obstack obst;
	obstack_init(&obst);

	if (n_pointer_levels > 0) {
		for (unsigned i = 0; i < n_pointer_levels-1; i++)
			obstack_1grow(&obst, '[');

		if (is_Primitive_type(curtype))
			obstack_1grow(&obst, '[');
	}

	if (is_Primitive_type(curtype)) {
		char c = get_prim_type_char(curtype);
		obstack_1grow(&obst, c);
	} else {
		assert(is_Class_type(curtype));
		obstack_1grow(&obst, 'L');
		obstack_printf(&obst, "%s", get_compound_name(curtype));
		obstack_1grow(&obst, ';');
		obstack_1grow(&obst, '\0');
	}
	const char *sig_bytes = obstack_finish(&obst);

	ir_entity *res = do_emit_utf8_const(sig_bytes, strlen(sig_bytes));

	obstack_free(&obst, NULL);
	return res;
}
Beispiel #9
0
static ir_type *get_Sel_or_SymConst_type(ir_node *sos)
{
	assert (is_Sel(sos) || is_SymConst_addr_ent(sos));

	ir_entity *entity = get_irn_entity_attr(sos);
	ir_type   *type   = get_entity_type(entity);

	if (is_Method_type(type)) {
		size_t n_ress = get_method_n_ress(type);
		if (n_ress == 0)
			return NULL;
		assert (n_ress == 1);
		type = get_method_res_type(type, 0);
	}

	if (is_Pointer_type(type))
		type = get_pointer_points_to_type(type);

	if (is_Class_type(type))
		return type;

	return NULL;
}
Beispiel #10
0
static void infer_typeinfo_walker(ir_node *irn, void *env)
{
	bool *changed = (bool*) env;

	// A node's type needs only to be calculated once.
	if (get_irn_typeinfo_type(irn) != initial_type)
		return;

	if (is_Alloc(irn)) {
		// this one is easy, we know the exact dynamic type.
		ir_type *type = get_Alloc_type(irn);
		if (! is_Class_type(type))
			return;

		set_irn_typeinfo_type(irn, type);
		*changed = true;
	}
	else if (is_Sel(irn) || is_SymConst_addr_ent(irn)) {
		// the type we determine here is the one of the entity we select or reference.
		// the transform_Sel method below will use the type incoming on the Sel_ptr input.
		ir_type *type = get_Sel_or_SymConst_type(irn);
		if (! type)
			return;

		ir_type *one_alive = get_alive_subclass(type);
		if (! one_alive)
			return;

		set_irn_typeinfo_type(irn, one_alive);
		*changed = true;
	}
	else if (is_Call(irn)) {
		// the dynamic type of the call result is the return type of the called entity.
		ir_node *call_pred = get_Call_ptr(irn);
		ir_type *pred_type = get_irn_typeinfo_type(call_pred);
		if (pred_type == initial_type)
			return;

		set_irn_typeinfo_type(irn, pred_type);
		*changed = true;
	}
	else if (is_Load(irn)) {
		// the dynamic type of the Load result is the type of the loaded entity.
		ir_node *load_pred = get_Load_ptr(irn);

		if (! is_Sel(load_pred) && !is_SymConst_addr_ent(load_pred))
			return;

		ir_type *pred_type = get_irn_typeinfo_type(load_pred);
		if (pred_type == initial_type)
			return;
		set_irn_typeinfo_type(irn, pred_type);
		*changed = true;
	}
	else if (is_Proj(irn)) {
		// Types have to be propagated through Proj nodes (XXX: and also through Cast and Confirm
		ir_mode *pmode = get_irn_mode(irn);
		if (pmode != mode_P)
			return;

		ir_node *proj_pred = get_Proj_pred(irn);
		if (is_Proj(proj_pred) && get_irn_mode(proj_pred) == mode_T && get_Proj_proj(proj_pred) == pn_Call_T_result && is_Call(get_Proj_pred(proj_pred)))
			proj_pred = get_Proj_pred(proj_pred); // skip the result tuple

		ir_type *pred_type = get_irn_typeinfo_type(proj_pred);
		if (pred_type == initial_type)
			return;

		set_irn_typeinfo_type(irn, pred_type);
		*changed = true;
	}
	else if (is_Phi(irn)) {
		// Phi nodes are a special case because the incoming type information must be merged
		// A Phi node's type is unknown until all inputs are known to be the same dynamic type.
		ir_mode *pmode = get_irn_mode(irn);
		if (pmode != mode_P)
			return;

		int phi_preds = get_Phi_n_preds(irn);
		ir_type *last = NULL;
		for (int p = 0; p < phi_preds; p++) {
			ir_node *pred = get_Phi_pred(irn, p);
			ir_type *pred_type = get_irn_typeinfo_type(pred);
			if (pred_type == initial_type)
				return;
			if (p && last != pred_type)
				return;

			last = pred_type;
		}
		set_irn_typeinfo_type(irn, last);
	}
}
Beispiel #11
0
/**
 * Lower a Sel node. Do not touch Sels accessing entities on the frame type.
 */
static void lower_sel(ir_node *sel)
{
	ir_graph  *irg   = get_irn_irg(sel);
	ir_entity *ent   = get_Sel_entity(sel);
	ir_type   *owner = get_entity_owner(ent);
	dbg_info  *dbg   = get_irn_dbg_info(sel);
	ir_mode   *mode  = get_irn_mode(sel);
	ir_node   *bl    = get_nodes_block(sel);
	ir_node   *newn;

	/* we can only replace Sels when the layout of the owner type is decided. */
	if (get_type_state(owner) != layout_fixed)
		return;

	if (0 < get_Sel_n_indexs(sel)) {
		/* an Array access */
		ir_type *basetyp = get_entity_type(ent);
		ir_mode *basemode;
		ir_node *index;
		if (is_Primitive_type(basetyp))
			basemode = get_type_mode(basetyp);
		else
			basemode = mode_P_data;

		assert(basemode && "no mode for lowering Sel");
		assert((get_mode_size_bits(basemode) % 8 == 0) && "can not deal with unorthodox modes");
		index = get_Sel_index(sel, 0);

		if (is_Array_type(owner)) {
			ir_type *arr_ty = owner;
			size_t   dims   = get_array_n_dimensions(arr_ty);
			size_t  *map    = ALLOCAN(size_t, dims);
			ir_mode *mode_Int = get_reference_mode_signed_eq(mode);
			ir_tarval *tv;
			ir_node *last_size;
			size_t   i;

			assert(dims == (size_t)get_Sel_n_indexs(sel)
				&& "array dimension must match number of indices of Sel node");

			for (i = 0; i < dims; i++) {
				size_t order = get_array_order(arr_ty, i);

				assert(order < dims &&
					"order of a dimension must be smaller than the arrays dim");
				map[order] = i;
			}
			newn = get_Sel_ptr(sel);

			/* Size of the array element */
			tv = new_tarval_from_long(get_type_size_bytes(basetyp), mode_Int);
			last_size = new_rd_Const(dbg, irg, tv);

			/*
			 * We compute the offset part of dimension d_i recursively
			 * with the the offset part of dimension d_{i-1}
			 *
			 *     off_0 = sizeof(array_element_type);
			 *     off_i = (u_i - l_i) * off_{i-1}  ; i >= 1
			 *
			 * whereas u_i is the upper bound of the current dimension
			 * and l_i the lower bound of the current dimension.
			 */
			for (i = dims; i > 0;) {
				size_t dim = map[--i];
				ir_node *lb, *ub, *elms, *n, *ind;

				elms = NULL;
				lb = get_array_lower_bound(arr_ty, dim);
				ub = get_array_upper_bound(arr_ty, dim);

				if (! is_Unknown(lb))
					lb = new_rd_Conv(dbg, bl, copy_const_value(get_irn_dbg_info(sel), lb, bl), mode_Int);
				else
					lb = NULL;

				if (! is_Unknown(ub))
					ub = new_rd_Conv(dbg, bl, copy_const_value(get_irn_dbg_info(sel), ub, bl), mode_Int);
				else
					ub = NULL;

				/*
				 * If the array has more than one dimension, lower and upper
				 * bounds have to be set in the non-last dimension.
				 */
				if (i > 0) {
					assert(lb != NULL && "lower bound has to be set in multi-dim array");
					assert(ub != NULL && "upper bound has to be set in multi-dim array");

					/* Elements in one Dimension */
					elms = new_rd_Sub(dbg, bl, ub, lb, mode_Int);
				}

				ind = new_rd_Conv(dbg, bl, get_Sel_index(sel, dim), mode_Int);

				/*
				 * Normalize index, id lower bound is set, also assume
				 * lower bound == 0
			 */
				if (lb != NULL)
					ind = new_rd_Sub(dbg, bl, ind, lb, mode_Int);

				n = new_rd_Mul(dbg, bl, ind, last_size, mode_Int);

				/*
				 * see comment above.
				 */
				if (i > 0)
					last_size = new_rd_Mul(dbg, bl, last_size, elms, mode_Int);

				newn = new_rd_Add(dbg, bl, newn, n, mode);
			}
		} else {
			/* no array type */
			ir_mode   *idx_mode = get_irn_mode(index);
			ir_tarval *tv       = new_tarval_from_long(get_mode_size_bytes(basemode), idx_mode);

			newn = new_rd_Add(dbg, bl, get_Sel_ptr(sel),
				new_rd_Mul(dbg, bl, index,
				new_r_Const(irg, tv),
				idx_mode),
				mode);
		}
	} else if (is_Method_type(get_entity_type(ent)) && is_Class_type(owner)) {
		/* We need an additional load when accessing methods from a dispatch
		 * table.
		 * Matze TODO: Is this really still used? At least liboo does its own
		 * lowering of Method-Sels...
		 */
		ir_mode   *ent_mode = get_type_mode(get_entity_type(ent));
		int        offset   = get_entity_offset(ent);
		ir_mode   *mode_Int = get_reference_mode_signed_eq(mode);
		ir_tarval *tv       = new_tarval_from_long(offset, mode_Int);
		ir_node   *cnst     = new_rd_Const(dbg, irg, tv);
		ir_node   *add      = new_rd_Add(dbg, bl, get_Sel_ptr(sel), cnst, mode);
		ir_node   *mem      = get_Sel_mem(sel);
		newn = new_rd_Load(dbg, bl, mem, add, ent_mode, cons_none);
		newn = new_r_Proj(newn, ent_mode, pn_Load_res);
	} else {
		int offset = get_entity_offset(ent);

		/* replace Sel by add(obj, const(ent.offset)) */
		newn = get_Sel_ptr(sel);
		if (offset != 0) {
			ir_mode   *mode_UInt = get_reference_mode_unsigned_eq(mode);
			ir_tarval *tv        = new_tarval_from_long(offset, mode_UInt);
			ir_node   *cnst      = new_r_Const(irg, tv);
			newn = new_rd_Add(dbg, bl, newn, cnst, mode);
		}
	}

	/* run the hooks */
	hook_lower(sel);

	exchange(sel, newn);
}