Example #1
0
ir_entity *gcji_get_rtti_entity(ir_type *type)
{
	if (is_Pointer_type(type)) {
		ir_type *points_to = get_pointer_points_to_type(type);
		if (is_Class_type(points_to))
			return oo_get_class_rtti_entity(points_to);
		return NULL;
	} else if (is_Primitive_type(type)) {
		if (type == type_boolean)
			return gcj_boolean_rtti_entity;
		if (type == type_byte)
			return gcj_byte_rtti_entity;
		if (type == type_char)
			return gcj_char_rtti_entity;
		if (type == type_short)
			return gcj_short_rtti_entity;
		if (type == type_int)
			return gcj_int_rtti_entity;
		if (type == type_long)
			return gcj_long_rtti_entity;
		if (type == type_float)
			return gcj_float_rtti_entity;
		if (type == type_double)
			return gcj_double_rtti_entity;
		panic("unexpected type");
	} else {
		return oo_get_class_rtti_entity(type);
	}
}
Example #2
0
ir_node *gcji_allocate_array(ir_type *eltype, ir_node *count)
{
	ir_node *jclass = gcji_get_runtime_classinfo(eltype);
	ir_node *res;
	ir_node *new_mem;
	if (is_Primitive_type(eltype)) {
		ir_node *addr      = new_Address(gcj_new_prim_array_entity);
		ir_node *args[]    = { jclass, count };
		ir_type *call_type = get_entity_type(gcj_new_prim_array_entity);
		ir_node *mem       = get_store();
		ir_node *call      = new_Call(mem, addr, ARRAY_SIZE(args), args,
		                              call_type);
		ir_node *ress      = new_Proj(call, mode_T, pn_Call_T_result);
		new_mem = new_Proj(call, mode_M, pn_Call_M);
		res     = new_r_Proj(ress, mode_reference, 0);
	} else {
		ir_node *addr      = new_Address(gcj_new_object_array_entity);
		ir_node *null      = new_Const(get_mode_null(mode_reference));
		ir_node *args[]    = { count, jclass, null };
		ir_type *call_type = get_entity_type(gcj_new_object_array_entity);
		ir_node *mem       = get_store();
		ir_node *call      = new_Call(mem, addr, ARRAY_SIZE(args), args,
		                              call_type);
		ir_node *ress      = new_Proj(call, mode_T, pn_Call_T_result);
		new_mem = new_Proj(call, mode_M, pn_Call_M);
		res     = new_Proj(ress, mode_reference, 0);
	}

	ir_node *assure_vptr = new_VptrIsSet(new_mem, res, type_jarray);
	ir_node *new_mem2    = new_Proj(assure_vptr, mode_M, pn_VptrIsSet_M);
	ir_node *res2        = new_Proj(assure_vptr, mode_reference,
	                                pn_VptrIsSet_res);
	set_store(new_mem2);
	return res2;
}
Example #3
0
static const char *get_gcc_machmode(ir_type *type)
{
	assert(is_Primitive_type(type));
	switch (get_type_size(type)) {
	case 4: return "si";
	case 8: return "di";
	default:
		panic("couldn't determine gcc machmode for type %+F", type);
	}
}
Example #4
0
static ir_entity *emit_type_signature(ir_type *type)
{
	ir_type *curtype = type;
	unsigned n_pointer_levels = 0;

	while (is_Pointer_type(curtype)) {
		n_pointer_levels++;
		curtype = get_pointer_points_to_type(curtype);
	}

	struct obstack obst;
	obstack_init(&obst);

	if (n_pointer_levels > 0) {
		for (unsigned i = 0; i < n_pointer_levels-1; i++)
			obstack_1grow(&obst, '[');

		if (is_Primitive_type(curtype))
			obstack_1grow(&obst, '[');
	}

	if (is_Primitive_type(curtype)) {
		char c = get_prim_type_char(curtype);
		obstack_1grow(&obst, c);
	} else {
		assert(is_Class_type(curtype));
		obstack_1grow(&obst, 'L');
		obstack_printf(&obst, "%s", get_compound_name(curtype));
		obstack_1grow(&obst, ';');
		obstack_1grow(&obst, '\0');
	}
	const char *sig_bytes = obstack_finish(&obst);

	ir_entity *res = do_emit_utf8_const(sig_bytes, strlen(sig_bytes));

	obstack_free(&obst, NULL);
	return res;
}
Example #5
0
static ir_node *gcji_get_runtime_classinfo_(ir_node *block, ir_node **mem,
                                            ir_type *type)
{
	ir_entity *rtti_entity = gcji_get_rtti_entity(type);
	ir_graph  *irg         = get_irn_irg(block);
	if (rtti_entity != NULL)
		return new_r_Address(irg, rtti_entity);

	/* Arrays are represented as pointer types. We extract the base type,
	 * get its classinfo and let gcj give the array type for that.
	 *
	 * gcj emits the type signature to the class' constant pool. During
	 * class linking, the reference to the utf8const is replaced by the
	 * reference to the appropriate class object.
	 */

	assert(is_Pointer_type(type));

	unsigned n_pointer_levels = 0;
	ir_type *eltype           = type;
	while (is_Pointer_type(eltype)) {
		++n_pointer_levels;
		eltype = get_pointer_points_to_type(eltype);
	}

	if (!is_Primitive_type(eltype))
		--n_pointer_levels;

	ir_entity *elem_cdf = gcji_get_rtti_entity(eltype);
	assert(elem_cdf != NULL);

	ir_node *array_class_ref = new_r_Address(irg, elem_cdf);
	for (unsigned d = 0; d < n_pointer_levels; d++) {
		array_class_ref = gcji_get_arrayclass(block, mem, array_class_ref);
	}
	return array_class_ref;
}
Example #6
0
/**
 * Lower a Sel node. Do not touch Sels accessing entities on the frame type.
 */
static void lower_sel(ir_node *sel)
{
	ir_graph  *irg   = get_irn_irg(sel);
	ir_entity *ent   = get_Sel_entity(sel);
	ir_type   *owner = get_entity_owner(ent);
	dbg_info  *dbg   = get_irn_dbg_info(sel);
	ir_mode   *mode  = get_irn_mode(sel);
	ir_node   *bl    = get_nodes_block(sel);
	ir_node   *newn;

	/* we can only replace Sels when the layout of the owner type is decided. */
	if (get_type_state(owner) != layout_fixed)
		return;

	if (0 < get_Sel_n_indexs(sel)) {
		/* an Array access */
		ir_type *basetyp = get_entity_type(ent);
		ir_mode *basemode;
		ir_node *index;
		if (is_Primitive_type(basetyp))
			basemode = get_type_mode(basetyp);
		else
			basemode = mode_P_data;

		assert(basemode && "no mode for lowering Sel");
		assert((get_mode_size_bits(basemode) % 8 == 0) && "can not deal with unorthodox modes");
		index = get_Sel_index(sel, 0);

		if (is_Array_type(owner)) {
			ir_type *arr_ty = owner;
			size_t   dims   = get_array_n_dimensions(arr_ty);
			size_t  *map    = ALLOCAN(size_t, dims);
			ir_mode *mode_Int = get_reference_mode_signed_eq(mode);
			ir_tarval *tv;
			ir_node *last_size;
			size_t   i;

			assert(dims == (size_t)get_Sel_n_indexs(sel)
				&& "array dimension must match number of indices of Sel node");

			for (i = 0; i < dims; i++) {
				size_t order = get_array_order(arr_ty, i);

				assert(order < dims &&
					"order of a dimension must be smaller than the arrays dim");
				map[order] = i;
			}
			newn = get_Sel_ptr(sel);

			/* Size of the array element */
			tv = new_tarval_from_long(get_type_size_bytes(basetyp), mode_Int);
			last_size = new_rd_Const(dbg, irg, tv);

			/*
			 * We compute the offset part of dimension d_i recursively
			 * with the the offset part of dimension d_{i-1}
			 *
			 *     off_0 = sizeof(array_element_type);
			 *     off_i = (u_i - l_i) * off_{i-1}  ; i >= 1
			 *
			 * whereas u_i is the upper bound of the current dimension
			 * and l_i the lower bound of the current dimension.
			 */
			for (i = dims; i > 0;) {
				size_t dim = map[--i];
				ir_node *lb, *ub, *elms, *n, *ind;

				elms = NULL;
				lb = get_array_lower_bound(arr_ty, dim);
				ub = get_array_upper_bound(arr_ty, dim);

				if (! is_Unknown(lb))
					lb = new_rd_Conv(dbg, bl, copy_const_value(get_irn_dbg_info(sel), lb, bl), mode_Int);
				else
					lb = NULL;

				if (! is_Unknown(ub))
					ub = new_rd_Conv(dbg, bl, copy_const_value(get_irn_dbg_info(sel), ub, bl), mode_Int);
				else
					ub = NULL;

				/*
				 * If the array has more than one dimension, lower and upper
				 * bounds have to be set in the non-last dimension.
				 */
				if (i > 0) {
					assert(lb != NULL && "lower bound has to be set in multi-dim array");
					assert(ub != NULL && "upper bound has to be set in multi-dim array");

					/* Elements in one Dimension */
					elms = new_rd_Sub(dbg, bl, ub, lb, mode_Int);
				}

				ind = new_rd_Conv(dbg, bl, get_Sel_index(sel, dim), mode_Int);

				/*
				 * Normalize index, id lower bound is set, also assume
				 * lower bound == 0
			 */
				if (lb != NULL)
					ind = new_rd_Sub(dbg, bl, ind, lb, mode_Int);

				n = new_rd_Mul(dbg, bl, ind, last_size, mode_Int);

				/*
				 * see comment above.
				 */
				if (i > 0)
					last_size = new_rd_Mul(dbg, bl, last_size, elms, mode_Int);

				newn = new_rd_Add(dbg, bl, newn, n, mode);
			}
		} else {
			/* no array type */
			ir_mode   *idx_mode = get_irn_mode(index);
			ir_tarval *tv       = new_tarval_from_long(get_mode_size_bytes(basemode), idx_mode);

			newn = new_rd_Add(dbg, bl, get_Sel_ptr(sel),
				new_rd_Mul(dbg, bl, index,
				new_r_Const(irg, tv),
				idx_mode),
				mode);
		}
	} else if (is_Method_type(get_entity_type(ent)) && is_Class_type(owner)) {
		/* We need an additional load when accessing methods from a dispatch
		 * table.
		 * Matze TODO: Is this really still used? At least liboo does its own
		 * lowering of Method-Sels...
		 */
		ir_mode   *ent_mode = get_type_mode(get_entity_type(ent));
		int        offset   = get_entity_offset(ent);
		ir_mode   *mode_Int = get_reference_mode_signed_eq(mode);
		ir_tarval *tv       = new_tarval_from_long(offset, mode_Int);
		ir_node   *cnst     = new_rd_Const(dbg, irg, tv);
		ir_node   *add      = new_rd_Add(dbg, bl, get_Sel_ptr(sel), cnst, mode);
		ir_node   *mem      = get_Sel_mem(sel);
		newn = new_rd_Load(dbg, bl, mem, add, ent_mode, cons_none);
		newn = new_r_Proj(newn, ent_mode, pn_Load_res);
	} else {
		int offset = get_entity_offset(ent);

		/* replace Sel by add(obj, const(ent.offset)) */
		newn = get_Sel_ptr(sel);
		if (offset != 0) {
			ir_mode   *mode_UInt = get_reference_mode_unsigned_eq(mode);
			ir_tarval *tv        = new_tarval_from_long(offset, mode_UInt);
			ir_node   *cnst      = new_r_Const(irg, tv);
			newn = new_rd_Add(dbg, bl, newn, cnst, mode);
		}
	}

	/* run the hooks */
	hook_lower(sel);

	exchange(sel, newn);
}