Exemple #1
0
static void replace_with_call(ir_node *node)
{
	widen_builtin(node);

	ir_type        *const mtp      = get_Builtin_type(node);
	ir_builtin_kind const kind     = get_Builtin_kind(node);
	char     const *const name     = get_builtin_name(kind);
	ir_type        *const arg1     = get_method_param_type(mtp, 0);
	char     const *const machmode = get_gcc_machmode(arg1);
	ident          *const id       = new_id_fmt("__%s%s2", name, machmode);
	ir_entity      *const entity
		= create_compilerlib_entity(get_id_str(id), mtp);

	dbg_info *const dbgi      = get_irn_dbg_info(node);
	ir_node  *const block     = get_nodes_block(node);
	ir_node  *const mem       = get_Builtin_mem(node);
	ir_graph *const irg       = get_irn_irg(node);
	ir_node  *const callee    = new_r_Address(irg, entity);
	int       const n_params  = get_Builtin_n_params(node);
	ir_node **const params    = get_Builtin_param_arr(node);
	ir_node  *const call      = new_rd_Call(dbgi, block, mem, callee, n_params, params, mtp);
	ir_node  *const call_mem  = new_r_Proj(call, mode_M, pn_Call_M);
	ir_node  *const call_ress = new_r_Proj(call, mode_T, pn_Call_T_result);
	ir_type  *const res_type  = get_method_res_type(mtp, 0);
	ir_mode  *const res_mode  = get_type_mode(res_type);
	ir_node  *const call_res  = new_r_Proj(call_ress, res_mode, 0);

	ir_node *const in[] = {
		[pn_Builtin_M]       = call_mem,
		[pn_Builtin_max + 1] = call_res,
	};
Exemple #2
0
static ir_node *create_fpu_mode_reload(void *const env, ir_node *const state, ir_node *const spill, ir_node *const before, ir_node *const last_state)
{
	(void)env;
	(void)state;

	ir_node        *reload;
	ir_node  *const block = get_nodes_block(before);
	ir_graph *const irg   = get_irn_irg(block);
	ir_node  *const noreg = ia32_new_NoReg_gp(irg);
	ir_node  *const nomem = get_irg_no_mem(irg);
	if (ia32_cg_config.use_unsafe_floatconv) {
		reload = new_bd_ia32_FldCW(NULL, block, noreg, noreg, nomem);
		ir_entity *const rounding_mode = spill ?
			create_ent(&fpcw_round,    0xC7F, "_fpcw_round") :
			create_ent(&fpcw_truncate, 0x37F, "_fpcw_truncate");
		set_ia32_am_ent(reload, rounding_mode);
	} else {
		ir_node       *mem;
		ir_node *const frame = get_irg_frame(irg);
		if (spill) {
			mem = spill;
		} else {
			assert(last_state);
			ir_node *const cwstore = create_fnstcw(block, frame, noreg, nomem, last_state);
			sched_add_before(before, cwstore);

			ir_node *const load = new_bd_ia32_Load(NULL, block, frame, noreg, cwstore);
			set_ia32_op_type(load, ia32_AddrModeS);
			set_ia32_ls_mode(load, mode_Hu);
			set_ia32_frame_use(load, IA32_FRAME_USE_32BIT);
			sched_add_before(before, load);

			ir_node *const load_res = new_r_Proj(load, ia32_mode_gp, pn_ia32_Load_res);

			/* TODO: Make the actual mode configurable in ChangeCW. */
			ir_node *const or_const = ia32_create_Immediate(irg, 0xC00);
			ir_node *const orn      = new_bd_ia32_Or(NULL, block, noreg, noreg, nomem, load_res, or_const);
			sched_add_before(before, orn);

			ir_node *const store = new_bd_ia32_Store(NULL, block, frame, noreg, nomem, orn);
			set_ia32_op_type(store, ia32_AddrModeD);
			/* Use ia32_mode_gp, as movl has a shorter opcode than movw. */
			set_ia32_ls_mode(store, ia32_mode_gp);
			set_ia32_frame_use(store, IA32_FRAME_USE_32BIT);
			sched_add_before(before, store);
			mem = new_r_Proj(store, mode_M, pn_ia32_Store_M);
		}

		reload = new_bd_ia32_FldCW(NULL, block, frame, noreg, mem);
	}

	set_ia32_op_type(reload, ia32_AddrModeS);
	set_ia32_ls_mode(reload, ia32_mode_fpcw);
	set_ia32_frame_use(reload, IA32_FRAME_USE_32BIT);
	arch_set_irn_register(reload, &ia32_registers[REG_FPCW]);
	sched_add_before(before, reload);
	return reload;
}
Exemple #3
0
static ir_node *gcji_get_arraylength(dbg_info *dbgi, ir_node *block,
                                     ir_node *arrayref, ir_node **mem)
{
	ir_node  *addr    = new_r_Member(block, arrayref, gcj_array_length);
	ir_node  *load    = new_rd_Load(dbgi, block, *mem, addr, mode_int,
	                                get_type_for_mode(mode_int), cons_none);
	ir_node  *new_mem = new_r_Proj(load, mode_M, pn_Load_M);
	ir_node  *res     = new_r_Proj(load, mode_int, pn_Load_res);
	*mem = new_mem;
	return res;
}
Exemple #4
0
/**
 * lower 64bit addition: an 32bit add for the lower parts, an add with
 * carry for the higher parts. If the carry's value is known, fold it
 * into the upper add.
 */
static void ia32_lower_add64(ir_node *node, ir_mode *mode)
{
	dbg_info     *dbg        = get_irn_dbg_info(node);
	ir_node      *block      = get_nodes_block(node);
	ir_node      *left       = get_Add_left(node);
	ir_node      *right      = get_Add_right(node);
	ir_node      *left_low   = get_lowered_low(left);
	ir_node      *left_high  = get_lowered_high(left);
	ir_node      *right_low  = get_lowered_low(right);
	ir_node      *right_high = get_lowered_high(right);
	ir_mode      *low_mode   = get_irn_mode(left_low);
	ir_mode      *high_mode  = get_irn_mode(left_high);
	carry_result  cr         = lower_add_carry(left, right, low_mode);

	assert(get_irn_mode(left_low)  == get_irn_mode(right_low));
	assert(get_irn_mode(left_high) == get_irn_mode(right_high));

	if (cr == no_carry) {
		ir_node *add_low  = new_rd_Add(dbg, block, left_low,  right_low, low_mode);
		ir_node *add_high = new_rd_Add(dbg, block, left_high, right_high, high_mode);
		ir_set_dw_lowered(node, add_low, add_high);
	} else if (cr == must_carry && (is_Const(left_high) || is_Const(right_high))) {
		// We cannot assume that left_high and right_high form a normalized Add.
		ir_node *constant;
		ir_node *other;

		if (is_Const(left_high)) {
			constant = left_high;
			other    = right_high;
		} else {
			constant = right_high;
			other    = left_high;
		}

		ir_graph *irg            = get_irn_irg(right_high);
		ir_node  *one            = new_rd_Const(dbg, irg, get_mode_one(high_mode));
		ir_node  *const_plus_one = new_rd_Add(dbg, block, constant, one, high_mode);
		ir_node  *add_high       = new_rd_Add(dbg, block, other, const_plus_one, high_mode);
		ir_node  *add_low        = new_rd_Add(dbg, block, left_low, right_low, low_mode);
		ir_set_dw_lowered(node, add_low, add_high);
	} else {
		/* l_res = a_l + b_l */
		ir_node  *add_low    = new_bd_ia32_l_Add(dbg, block, left_low, right_low);
		ir_mode  *mode_flags = ia32_reg_classes[CLASS_ia32_flags].mode;
		ir_node  *res_low    = new_r_Proj(add_low, ia32_mode_gp, pn_ia32_l_Add_res);
		ir_node  *flags      = new_r_Proj(add_low, mode_flags, pn_ia32_l_Add_flags);

		/* h_res = a_h + b_h + carry */
		ir_node  *add_high
			= new_bd_ia32_l_Adc(dbg, block, left_high, right_high, flags, mode);
		ir_set_dw_lowered(node, res_low, add_high);
	}
}
Exemple #5
0
/**
 * lower 64bit minus operation
 */
static void ia32_lower_minus64(ir_node *node, ir_mode *mode)
{
	dbg_info *dbg     = get_irn_dbg_info(node);
	ir_node  *block   = get_nodes_block(node);
	ir_node  *op      = get_Minus_op(node);
	ir_node  *op_low  = get_lowered_low(op);
	ir_node  *op_high = get_lowered_high(op);
	ir_node  *minus   = new_bd_ia32_l_Minus64(dbg, block, op_low, op_high);
	ir_node  *l_res   = new_r_Proj(minus, ia32_mode_gp, pn_ia32_Minus64_res_low);
	ir_node  *h_res   = new_r_Proj(minus, mode, pn_ia32_Minus64_res_high);
	ir_set_dw_lowered(node, l_res, h_res);
}
Exemple #6
0
/**
 * lower 64bit subtraction: a 32bit sub for the lower parts, a sub
 * with borrow for the higher parts. If the borrow's value is known,
 * fold it into the upper sub.
 */
static void ia32_lower_sub64(ir_node *node, ir_mode *mode)
{
	dbg_info     *dbg        = get_irn_dbg_info(node);
	ir_node      *block      = get_nodes_block(node);
	ir_node      *left       = get_Sub_left(node);
	ir_node      *right      = get_Sub_right(node);
	ir_node      *left_low   = get_lowered_low(left);
	ir_node      *left_high  = get_lowered_high(left);
	ir_node      *right_low  = get_lowered_low(right);
	ir_node      *right_high = get_lowered_high(right);
	ir_mode      *low_mode   = get_irn_mode(left_low);
	ir_mode      *high_mode  = get_irn_mode(left_high);
	carry_result  cr         = lower_sub_borrow(left, right, low_mode);

	assert(get_irn_mode(left_low)  == get_irn_mode(right_low));
	assert(get_irn_mode(left_high) == get_irn_mode(right_high));

	if (cr == no_carry) {
		ir_node *sub_low  = new_rd_Sub(dbg, block, left_low,  right_low, low_mode);
		ir_node *sub_high = new_rd_Sub(dbg, block, left_high, right_high, high_mode);
		ir_set_dw_lowered(node, sub_low, sub_high);
	} else if (cr == must_carry && (is_Const(left_high) || is_Const(right_high))) {
		ir_node  *sub_high;
		ir_graph *irg        = get_irn_irg(right_high);
		ir_node  *one        = new_rd_Const(dbg, irg, get_mode_one(high_mode));

		if (is_Const(right_high)) {
			ir_node *new_const = new_rd_Add(dbg, block, right_high, one, high_mode);
			sub_high = new_rd_Sub(dbg, block, left_high, new_const, high_mode);
		} else if (is_Const(left_high)) {
			ir_node *new_const = new_rd_Sub(dbg, block, left_high, one, high_mode);
			sub_high = new_rd_Sub(dbg, block, new_const, right_high, high_mode);
		} else {
			panic("logic error");
		}

		ir_node  *sub_low  = new_rd_Sub(dbg, block, left_low, right_low, low_mode);
		ir_set_dw_lowered(node, sub_low, sub_high);
	} else {
		/* l_res = a_l - b_l */
		ir_node  *sub_low    = new_bd_ia32_l_Sub(dbg, block, left_low, right_low);
		ir_mode  *mode_flags = ia32_reg_classes[CLASS_ia32_flags].mode;
		ir_node  *res_low    = new_r_Proj(sub_low, ia32_mode_gp, pn_ia32_l_Sub_res);
		ir_node  *flags      = new_r_Proj(sub_low, mode_flags, pn_ia32_l_Sub_flags);

		/* h_res = a_h - b_h - carry */
		ir_node  *sub_high
			= new_bd_ia32_l_Sbb(dbg, block, left_high, right_high, flags, mode);
		ir_set_dw_lowered(node, res_low, sub_high);
	}
}
Exemple #7
0
ir_node *dmemory_default_get_arraylength(ir_node* objptr, ir_graph *irg, ir_node *block, ir_node **mem)
{
	/* calculate address of arraylength field */
	int       length_len  = get_mode_size_bytes(default_arraylength_mode);
	ir_node  *cnst        = new_r_Const_long(irg, mode_P, -length_len);
	ir_node  *length_addr = new_r_Add(block, objptr, cnst, mode_P);

	ir_node  *cur_mem     = *mem;
	ir_node  *load        = new_r_Load(block, cur_mem, length_addr, default_arraylength_mode, cons_none);
	cur_mem               = new_r_Proj(load, mode_M, pn_Load_M);
	ir_node  *len         = new_r_Proj(load, default_arraylength_mode, pn_Load_res);
	*mem = cur_mem;
	return len;
}
Exemple #8
0
ir_node *gcji_allocate_array(ir_type *eltype, ir_node *count)
{
	ir_node *jclass = gcji_get_runtime_classinfo(eltype);
	ir_node *res;
	ir_node *new_mem;
	if (is_Primitive_type(eltype)) {
		ir_node *addr      = new_Address(gcj_new_prim_array_entity);
		ir_node *args[]    = { jclass, count };
		ir_type *call_type = get_entity_type(gcj_new_prim_array_entity);
		ir_node *mem       = get_store();
		ir_node *call      = new_Call(mem, addr, ARRAY_SIZE(args), args,
		                              call_type);
		ir_node *ress      = new_Proj(call, mode_T, pn_Call_T_result);
		new_mem = new_Proj(call, mode_M, pn_Call_M);
		res     = new_r_Proj(ress, mode_reference, 0);
	} else {
		ir_node *addr      = new_Address(gcj_new_object_array_entity);
		ir_node *null      = new_Const(get_mode_null(mode_reference));
		ir_node *args[]    = { count, jclass, null };
		ir_type *call_type = get_entity_type(gcj_new_object_array_entity);
		ir_node *mem       = get_store();
		ir_node *call      = new_Call(mem, addr, ARRAY_SIZE(args), args,
		                              call_type);
		ir_node *ress      = new_Proj(call, mode_T, pn_Call_T_result);
		new_mem = new_Proj(call, mode_M, pn_Call_M);
		res     = new_Proj(ress, mode_reference, 0);
	}

	ir_node *assure_vptr = new_VptrIsSet(new_mem, res, type_jarray);
	ir_node *new_mem2    = new_Proj(assure_vptr, mode_M, pn_VptrIsSet_M);
	ir_node *res2        = new_Proj(assure_vptr, mode_reference,
	                                pn_VptrIsSet_res);
	set_store(new_mem2);
	return res2;
}
Exemple #9
0
static ir_node *make_softfloat_call(ir_node *const n, char const *const name,
                                    size_t const arity,
                                    ir_node *const *const in)
{
	dbg_info *const dbgi     = get_irn_dbg_info(n);
	ir_node  *const block    = get_nodes_block(n);
	ir_graph *const irg      = get_irn_irg(n);
	ir_node  *const nomem    = get_irg_no_mem(irg);
	ir_node  *const callee   = create_softfloat_address(n, name);
	ir_type  *const type     = get_softfloat_type(n);
	ir_mode  *const res_mode = get_type_mode(get_method_res_type(type, 0));
	ir_node  *const call     = new_rd_Call(dbgi, block, nomem, callee, arity,
	                                       in, type);
	ir_node  *const results  = new_r_Proj(call, mode_T, pn_Call_T_result);
	ir_node  *const result   = new_r_Proj(results, res_mode, 0);
	return result;
}
Exemple #10
0
ir_node *dmemory_default_alloc_array(ir_type *eltype, ir_node *count, ir_graph *irg, ir_node *block, ir_node **mem)
{
	ir_node *cur_mem      = *mem;

	unsigned count_size   = get_mode_size_bytes(default_arraylength_mode);
	unsigned element_size = is_Class_type(eltype) ? get_mode_size_bytes(mode_P) : get_type_size_bytes(eltype); // FIXME: some langs support arrays of structs.
	/* increase element count so we have enough space for a counter
			   at the front */
	unsigned add_size     = (element_size + (count_size-1)) / count_size;
	ir_node *count_u      = new_r_Conv(block, count, mode_Iu);
	ir_node *addv         = new_r_Const_long(irg, mode_Iu, add_size);
	ir_node *add1         = new_r_Add(block, count_u, addv, mode_Iu);
	ir_node *elsizev      = new_r_Const_long(irg, mode_Iu, element_size);

	ir_node *size         = new_r_Mul(block, add1, elsizev, mode_Iu);
	unsigned addr_delta   = add_size * element_size;

	symconst_symbol calloc_sym;
	calloc_sym.entity_p   = calloc_entity;
	ir_node *callee       = new_r_SymConst(irg, mode_P, calloc_sym, symconst_addr_ent);

	ir_node *one          = new_r_Const_long(irg, mode_Iu, 1);
	ir_node *in[2]        = { one, size };
	ir_type *call_type    = get_entity_type(calloc_entity);
	ir_node *call         = new_r_Call(block, cur_mem, callee, 2, in, call_type);
	cur_mem               = new_r_Proj(call, mode_M, pn_Call_M);
	ir_node *ress         = new_r_Proj(call, mode_T, pn_Call_T_result);
	ir_node *res          = new_r_Proj(ress, mode_P, 0);

	/* write length of array */
	ir_node *len_value    = new_r_Conv(block, count, default_arraylength_mode);

	ir_node *len_delta    = new_r_Const_long(irg, mode_P, (int)addr_delta-4); //FIXME: replace magic num
	ir_node *len_addr     = new_r_Add(block, res, len_delta, mode_P);
	ir_node *store        = new_r_Store(block, cur_mem, len_addr, len_value, cons_none);
	cur_mem               = new_r_Proj(store, mode_M, pn_Store_M);

	if (addr_delta > 0) {
		ir_node *delta = new_r_Const_long(irg, mode_P, (int)addr_delta);
		res = new_r_Add(block, res, delta, mode_P);
	}

	*mem = cur_mem;
	return res;
}
Exemple #11
0
static void lower64_sub(ir_node *node, ir_mode *mode)
{
	dbg_info *dbgi       = get_irn_dbg_info(node);
	ir_node  *block      = get_nodes_block(node);
	ir_node  *left       = get_Sub_left(node);
	ir_node  *right      = get_Sub_right(node);
	ir_node  *left_low   = get_lowered_low(left);
	ir_node  *left_high  = get_lowered_high(left);
	ir_node  *right_low  = get_lowered_low(right);
	ir_node  *right_high = get_lowered_high(right);
	ir_node  *subs       = new_bd_arm_SubS_t(dbgi, block, left_low, right_low);
	ir_mode  *mode_low   = get_irn_mode(left_low);
	ir_node  *res_low    = new_r_Proj(subs, mode_low, pn_arm_SubS_t_res);
	ir_node  *res_flags  = new_r_Proj(subs, mode_ANY, pn_arm_SubS_t_flags);
	ir_node  *sbc        = new_bd_arm_SbC_t(dbgi, block, left_high,
	                                        right_high, res_flags, mode);
	ir_set_dw_lowered(node, res_low, sbc);
}
Exemple #12
0
static void lower64_add(ir_node *node, ir_mode *mode)
{
	dbg_info *dbgi       = get_irn_dbg_info(node);
	ir_node  *block      = get_nodes_block(node);
	ir_node  *left       = get_Add_left(node);
	ir_node  *right      = get_Add_right(node);
	ir_node  *left_low   = get_lowered_low(left);
	ir_node  *left_high  = get_lowered_high(left);
	ir_node  *right_low  = get_lowered_low(right);
	ir_node  *right_high = get_lowered_high(right);
	ir_node  *adds       = new_bd_arm_AddS_t(dbgi, block, left_low, right_low);
	ir_mode  *mode_low   = get_irn_mode(left_low);
	ir_node  *res_low    = new_r_Proj(adds, mode_low, pn_arm_AddS_t_res);
	ir_node  *res_flags  = new_r_Proj(adds, mode_ANY, pn_arm_AddS_t_flags);
	ir_node  *adc        = new_bd_arm_AdC_t(dbgi, block, left_high,
	                                        right_high, res_flags, mode);
	ir_set_dw_lowered(node, res_low, adc);
}
Exemple #13
0
static ir_node *gcji_instanceof(ir_node *objptr, ir_type *classtype,
                                ir_graph *irg, ir_node *block, ir_node **mem)
{
	ir_node *jclass    = gcji_get_runtime_classinfo_(block, mem, classtype);
	ir_node *addr      = new_r_Address(irg, gcj_instanceof_entity);
	ir_node *args[]    = { objptr, jclass };
	ir_type *call_type = get_entity_type(gcj_instanceof_entity);
	ir_node *call      = new_r_Call(block, *mem, addr, ARRAY_SIZE(args), args,
	                                call_type);
	ir_node *new_mem   = new_r_Proj(call, mode_M, pn_Call_M);
	ir_node *ress      = new_r_Proj(call, mode_T, pn_Call_T_result);
	ir_node *call_res  = new_r_Proj(ress, mode_int, 0);
	ir_node *zero      = new_r_Const(irg, get_mode_null(mode_int));
	ir_node *res       = new_r_Cmp(block, call_res, zero,
	                               ir_relation_less_greater);
	*mem = new_mem;
	return res;
}
Exemple #14
0
/**
 * Turn a small CopyB node into a series of Load/Store nodes.
 */
static void lower_small_copyb_node(ir_node *irn)
{
	ir_graph      *irg         = get_irn_irg(irn);
	dbg_info      *dbgi        = get_irn_dbg_info(irn);
	ir_node       *block       = get_nodes_block(irn);
	ir_type       *tp          = get_CopyB_type(irn);
	ir_node       *addr_src    = get_CopyB_src(irn);
	ir_node       *addr_dst    = get_CopyB_dst(irn);
	ir_node       *mem         = get_CopyB_mem(irn);
	ir_mode       *mode_ref    = get_irn_mode(addr_src);
	unsigned       mode_bytes  = allow_misalignments ? native_mode_bytes
	                                                 : get_type_alignment(tp);
	unsigned       size        = get_type_size(tp);
	unsigned       offset      = 0;
	bool           is_volatile = get_CopyB_volatility(irn) == volatility_is_volatile;
	ir_cons_flags  flags       = is_volatile ? cons_volatile : cons_none;

	while (offset < size) {
		ir_mode *mode = get_ir_mode(mode_bytes);
		for (; offset + mode_bytes <= size; offset += mode_bytes) {
			ir_mode *mode_ref_int = get_reference_offset_mode(mode_ref);

			/* construct offset */
			ir_node *addr_const = new_r_Const_long(irg, mode_ref_int, offset);
			ir_node *add        = new_r_Add(block, addr_src, addr_const);

			ir_node *load     = new_rd_Load(dbgi, block, mem, add, mode, tp, flags);
			ir_node *load_res = new_r_Proj(load, mode, pn_Load_res);
			ir_node *load_mem = new_r_Proj(load, mode_M, pn_Load_M);

			ir_node *addr_const2 = new_r_Const_long(irg, mode_ref_int, offset);
			ir_node *add2        = new_r_Add(block, addr_dst, addr_const2);

			ir_node *store     = new_rd_Store(dbgi, block, load_mem, add2, load_res, tp, flags);
			ir_node *store_mem = new_r_Proj(store, mode_M, pn_Store_M);

			mem = store_mem;
		}

		mode_bytes /= 2;
	}

	exchange(irn, mem);
}
Exemple #15
0
static void lower64_minus(ir_node *node, ir_mode *mode)
{
	dbg_info *dbgi         = get_irn_dbg_info(node);
	ir_graph *irg          = get_irn_irg(node);
	ir_node  *block        = get_nodes_block(node);
	ir_node  *op           = get_Minus_op(node);
	ir_node  *right_low    = get_lowered_low(op);
	ir_node  *right_high   = get_lowered_high(op);
	ir_mode  *low_unsigned = get_irn_mode(right_low);
	ir_node  *left_low     = new_r_Const_null(irg, low_unsigned);
	ir_node  *left_high    = new_r_Const_null(irg, mode);
	ir_node  *subs         = new_bd_arm_SubS_t(dbgi, block, left_low,
	                                           right_low);
	ir_node  *res_low      = new_r_Proj(subs, low_unsigned, pn_arm_SubS_t_res);
	ir_node  *res_flags    = new_r_Proj(subs, mode_ANY, pn_arm_SubS_t_flags);
	ir_node  *sbc          = new_bd_arm_SbC_t(dbgi, block, left_high,
	                                          right_high, res_flags, mode);
	ir_set_dw_lowered(node, res_low, sbc);
}
Exemple #16
0
ir_node *be_get_start_proj(ir_graph *const irg, be_start_info_t *const info)
{
	if (!info->irn) {
		/* This is already the transformed start node. */
		ir_node                     *const start = get_irg_start(irg);
		arch_register_class_t const *const cls   = arch_get_irn_register_req_out(start, info->pos)->cls;
		info->irn = new_r_Proj(start, cls ? cls->mode : mode_M, info->pos);
	}
	return info->irn;
}
Exemple #17
0
static ir_node *create_gotpcrel_load(ir_graph  *irg, ir_entity *const entity)
{
	ir_node *const addr
		= be_new_Relocation(irg, X86_IMM_GOTPCREL, entity, mode_P);
	ir_type *const type  = get_entity_type(entity);
	ir_node *const nomem = get_irg_no_mem(irg);
	ir_node *const block = get_irg_start_block(irg);
	ir_node *const load  = new_rd_Load(NULL, block, nomem, addr, mode_P,
									   type, cons_floats);
	return new_r_Proj(load, mode_P, pn_Load_res);
}
Exemple #18
0
/** patches Addresses to work in position independent code */
static void fix_pic_addresses(ir_node *const node, void *const data)
{
	(void)data;

	ir_graph      *const irg = get_irn_irg(node);
	be_main_env_t *const be  = be_get_irg_main_env(irg);
	foreach_irn_in(node, i, pred) {
		if (!is_Address(pred))
			continue;

		ir_node         *res;
		ir_entity *const entity = get_Address_entity(pred);
		dbg_info  *const dbgi   = get_irn_dbg_info(pred);
		if (i == n_Call_ptr && is_Call(node)) {
			/* Calls can jump to relative addresses, so we can directly jump to
			 * the (relatively) known call address or the trampoline */
			if (can_address_relative(entity))
				continue;

			ir_entity *const trampoline = get_trampoline(be, entity);
			res = new_rd_Address(dbgi, irg, trampoline);
		} else if (get_entity_type(entity) == get_code_type()) {
			/* Block labels can always be addressed directly. */
			continue;
		} else {
			/* Everything else is accessed relative to EIP. */
			ir_node *const block    = get_nodes_block(pred);
			ir_mode *const mode     = get_irn_mode(pred);
			ir_node *const pic_base = ia32_get_pic_base(irg);

			if (can_address_relative(entity)) {
				/* All ok now for locally constructed stuff. */
				res = new_rd_Add(dbgi, block, pic_base, pred, mode);
				/* Make sure the walker doesn't visit this add again. */
				mark_irn_visited(res);
			} else {
				/* Get entry from pic symbol segment. */
				ir_entity *const pic_symbol  = get_pic_symbol(be, entity);
				ir_node   *const pic_address = new_rd_Address(dbgi, irg, pic_symbol);
				ir_node   *const add         = new_rd_Add(dbgi, block, pic_base, pic_address, mode);
				mark_irn_visited(add);

				/* We need an extra indirection for global data outside our current
				 * module. The loads are always safe and can therefore float and
				 * need no memory input */
				ir_type *const type  = get_entity_type(entity);
				ir_node *const nomem = get_irg_no_mem(irg);
				ir_node *const load  = new_rd_Load(dbgi, block, nomem, add, mode, type, cons_floats);
				res = new_r_Proj(load, mode, pn_Load_res);
			}
		}
		set_irn_n(node, i, res);
	}
}
Exemple #19
0
ir_graph *new_ir_graph(ir_entity *ent, int n_loc)
{
	/* We cannot create graphs before setting mode_P. */
	assert(mode_P != NULL && "mode_P is not set (target not initialized?)");

	ir_graph *res = new_r_ir_graph(ent, n_loc);

	ir_node *const start = new_r_Start(res);
	set_irg_start(res, start);

	/* Proj results of start node */
	set_irg_frame(res, new_r_Proj(start, mode_P, pn_Start_P_frame_base));
	set_irg_args(res, new_r_Proj(start, mode_T, pn_Start_T_args));
	ir_node *const initial_mem = new_r_Proj(start, mode_M, pn_Start_M);
	set_irg_initial_mem(res, initial_mem);

	set_r_store(res, initial_mem);

	add_irp_irg(res);
	return res;
}
Exemple #20
0
ir_node *dmemory_default_alloc_object(ir_type *type, ir_graph *irg, ir_node *block, ir_node **mem)
{
	ir_node  *cur_mem = *mem;
	symconst_symbol type_sym;
	type_sym.type_p = type;
	ir_node  *size = new_r_SymConst(irg, mode_Iu, type_sym, symconst_type_size);

	symconst_symbol calloc_sym;
	calloc_sym.entity_p = calloc_entity;
	ir_node *callee = new_r_SymConst(irg, mode_P, calloc_sym, symconst_addr_ent);

	ir_node *one       = new_r_Const_long(irg, mode_Iu, 1);
	ir_node *in[2]     = { one, size };
	ir_type *call_type = get_entity_type(calloc_entity);
	ir_node *call      = new_r_Call(block, cur_mem, callee, 2, in, call_type);
	cur_mem            = new_r_Proj(call, mode_M, pn_Call_M);
	ir_node *ress      = new_r_Proj(call, mode_T, pn_Call_T_result);
	ir_node *res       = new_r_Proj(ress, mode_P, 0);

	*mem = cur_mem;
	return res;
}
Exemple #21
0
static ir_node *arm_new_reload(ir_node *value, ir_node *spill, ir_node *before)
{
	ir_node  *block  = get_block(before);
	ir_graph *irg    = get_irn_irg(before);
	ir_node  *frame  = get_irg_frame(irg);
	ir_mode  *mode   = get_irn_mode(value);
	ir_node  *load   = new_bd_arm_Ldr(NULL, block, frame, spill, mode, NULL,
	                                  false, 0, true);
	ir_node  *proj   = new_r_Proj(load, mode, pn_arm_Ldr_res);
	arch_add_irn_flags(load, arch_irn_flag_reload);
	sched_add_before(before, load);
	return proj;
}
Exemple #22
0
void be_default_lower_va_arg(ir_node *const node, bool const compound_is_ptr,
                             unsigned const stack_param_align)
{
	ir_node  *block = get_nodes_block(node);
	dbg_info *dbgi  = get_irn_dbg_info(node);
	ir_graph *irg   = get_irn_irg(node);

	ir_type       *aptype   = get_method_res_type(get_Builtin_type(node), 0);
	ir_node *const ap       = get_irn_n(node, 1);
	ir_node *const node_mem = get_Builtin_mem(node);

	ir_mode *apmode = get_type_mode(aptype);
	ir_node *res;
	ir_node *new_mem;
	if (apmode) {
		goto load;
	} else if (compound_is_ptr) {
		apmode = mode_P;
		aptype = get_type_for_mode(apmode);
load:;
		ir_node *const load = new_rd_Load(dbgi, block, node_mem, ap, apmode, aptype, cons_none);
		res     = new_r_Proj(load, apmode, pn_Load_res);
		new_mem = new_r_Proj(load, mode_M,pn_Load_M);
	} else {
		/* aptype has no associated mode, so it is represented as a pointer. */
		res     = ap;
		new_mem = node_mem;
	}

	unsigned const round_up    = round_up2(get_type_size(aptype),
	                                       stack_param_align);
	ir_mode *const offset_mode = get_reference_offset_mode(mode_P);
	ir_node *const offset      = new_r_Const_long(irg, offset_mode, round_up);
	ir_node *const new_ap      = new_rd_Add(dbgi, block, ap, offset);

	ir_node *const in[] = { new_mem, res, new_ap };
	turn_into_tuple(node, ARRAY_SIZE(in), in);
}
Exemple #23
0
static void lower64_mul(ir_node *node, ir_mode *mode)
{
	dbg_info *dbgi       = get_irn_dbg_info(node);
	ir_node  *block      = get_nodes_block(node);
	ir_node  *left       = get_Mul_left(node);
	ir_node  *right      = get_Mul_right(node);
	ir_node  *left_low   = get_lowered_low(left);
	ir_node  *left_high  = get_lowered_high(left);
	ir_node  *right_low  = get_lowered_low(right);
	ir_node  *right_high = get_lowered_high(right);
	ir_node  *conv_l_low = new_rd_Conv(dbgi, block, left_low, mode);
	ir_node  *mul1       = new_rd_Mul(dbgi, block, conv_l_low, right_high,
	                                  mode);
	ir_node  *umull      = new_bd_arm_UMulL_t(dbgi, block, left_low, right_low);
	ir_mode  *umode      = get_irn_mode(right_low);
	ir_node  *umull_low  = new_r_Proj(umull, umode, pn_arm_UMulL_t_low);
	ir_node  *umull_high = new_r_Proj(umull, mode, pn_arm_UMulL_t_high);
	ir_node  *conv_r_low = new_rd_Conv(dbgi, block, right_low, mode);
	ir_node  *mul2       = new_rd_Mul(dbgi, block, conv_r_low, left_high, mode);
	ir_node  *add1       = new_rd_Add(dbgi, block, mul2, mul1, mode);
	ir_node  *add2       = new_rd_Add(dbgi, block, add1, umull_high, mode);
	ir_set_dw_lowered(node, umull_low, add2);
}
Exemple #24
0
ir_node *gcji_lookup_interface(ir_node *objptr, ir_type *iface,
                               ir_entity *method, ir_graph *irg,
                               ir_node *block, ir_node **mem)
{
	ir_node   *cur_mem       = *mem;

	// we need the reference to the object's class$ field
	// first, dereference the vptr in order to get the vtable address.
	ir_entity *vptr_entity   = get_vptr_entity();
	ir_type   *vptr_type     = get_entity_type(vptr_entity);
	ir_node   *vptr_addr     = new_r_Member(block, objptr, vptr_entity);
	ir_node   *vptr_load     = new_r_Load(block, cur_mem, vptr_addr, mode_reference, vptr_type, cons_none);
	ir_node   *vtable_addr   = new_r_Proj(vptr_load, mode_reference, pn_Load_res);
	           cur_mem       = new_r_Proj(vptr_load, mode_M, pn_Load_M);

	// second, dereference vtable_addr (it points to the slot where the address of the class$ field is stored).
	ir_node   *cd_load       = new_r_Load(block, cur_mem, vtable_addr, mode_reference, vptr_type, cons_none);
	ir_node   *cd_ref        = new_r_Proj(cd_load, mode_reference, pn_Load_res);
	           cur_mem       = new_r_Proj(cd_load, mode_M, pn_Load_M);

	class_t   *linked_class  = (class_t*)  oo_get_type_link(iface);
	method_t  *linked_method = (method_t*) oo_get_entity_link(method);
	assert(linked_class && linked_method);

	constant_t *name_const   = linked_class->constants[linked_method->name_index];
	ir_entity *name_const_ent= gcji_emit_utf8_const(name_const, 1);
	ir_node   *name_ref      = new_r_Address(irg, name_const_ent);

	constant_t *desc_const   = linked_class->constants[linked_method->descriptor_index];
	ir_entity *desc_const_ent= gcji_emit_utf8_const(desc_const, 1);
	ir_node   *desc_ref      = new_r_Address(irg, desc_const_ent);

	ir_node   *callee        = new_r_Address(irg, gcj_lookup_interface_entity);

	ir_node   *args[3]       = { cd_ref, name_ref, desc_ref };
	ir_type   *call_type     = get_entity_type(gcj_lookup_interface_entity);
	ir_node   *call          = new_r_Call(block, cur_mem, callee, 3, args, call_type);
	           cur_mem       = new_r_Proj(call, mode_M, pn_Call_M);
	ir_node   *ress          = new_r_Proj(call, mode_T, pn_Call_T_result);
	ir_node   *res           = new_r_Proj(ress, mode_reference, 0);

	*mem = cur_mem;

	return res;
}
Exemple #25
0
/**
 * Turn a large CopyB node into a memcpy call.
 */
static void lower_large_copyb_node(ir_node *irn)
{
	ir_graph *irg      = get_irn_irg(irn);
	ir_node  *block    = get_nodes_block(irn);
	dbg_info *dbgi     = get_irn_dbg_info(irn);
	ir_node  *mem      = get_CopyB_mem(irn);
	ir_node  *addr_src = get_CopyB_src(irn);
	ir_node  *addr_dst = get_CopyB_dst(irn);
	ir_type  *copyb_tp = get_CopyB_type(irn);
	unsigned  size     = get_type_size(copyb_tp);

	ir_node  *callee      = get_memcpy_address(irg);
	ir_type  *call_tp     = get_memcpy_methodtype();
	ir_mode  *mode_size_t = get_ir_mode(native_mode_bytes);
	ir_node  *size_cnst   = new_r_Const_long(irg, mode_size_t, size);
	ir_node  *in[]        = { addr_dst, addr_src, size_cnst };
	ir_node  *call        = new_rd_Call(dbgi, block, mem, callee, ARRAY_SIZE(in), in, call_tp);
	ir_node  *call_mem    = new_r_Proj(call, mode_M, pn_Call_M);

	exchange(irn, call_mem);
}
Exemple #26
0
static void transform_Proj_Alloc(ir_node *node)
{
	/* we might need a result adjustment */
	if (addr_delta == 0)
		return;
	if (get_Proj_proj(node) != pn_Alloc_res)
		return;
	if (ir_nodeset_contains(&transformed, node))
		return;

	ir_node  *const alloc = get_Proj_pred(node);
	dbg_info *const dbgi  = get_irn_dbg_info(alloc);
	ir_graph *const irg   = get_irn_irg(node);
	ir_node  *const block = get_nodes_block(node);
	ir_node  *const delta = new_r_Const_long(irg, mode_P, addr_delta);
	ir_node  *const dummy = new_r_Dummy(irg, mode_P);
	ir_node  *const add   = new_rd_Add(dbgi, block, dummy, delta, mode_P);

	exchange(node, add);
	ir_node *const new_proj = new_r_Proj(alloc, mode_P, pn_Alloc_res);
	set_Add_left(add, new_proj);
	ir_nodeset_insert(&transformed, new_proj);
}
Exemple #27
0
 * implementation of create_set_func which produces a cond with control
 * flow
 */
static ir_node *create_cond_set(ir_node *cond_value, ir_mode *dest_mode)
{
	ir_node *lower_block = part_block_edges(cond_value);
	ir_node *upper_block = get_nodes_block(cond_value);
	foreach_out_edge_safe(upper_block, edge) {
		/* The cached nodes might belong to the lower block, so we have
		 * to clear the cache for moved nodes to avoid dominance problems. */
		ir_node *node = get_edge_src_irn(edge);
		set_irn_link(node, NULL);
	}
	ir_graph *irg         = get_irn_irg(cond_value);
	ir_node  *cond        = new_r_Cond(upper_block, cond_value);
	ir_node  *proj_true   = new_r_Proj(cond, mode_X, pn_Cond_true);
	ir_node  *proj_false  = new_r_Proj(cond, mode_X, pn_Cond_false);
	ir_node  *in_true[1]  = { proj_true };
	ir_node  *in_false[1] = { proj_false };
	ir_node  *true_block  = new_r_Block(irg, ARRAY_SIZE(in_true), in_true);
	ir_node  *false_block = new_r_Block(irg, ARRAY_SIZE(in_false),in_false);
	ir_node  *true_jmp    = new_r_Jmp(true_block);
	ir_node  *false_jmp   = new_r_Jmp(false_block);
	ir_node  *lower_in[2] = { true_jmp, false_jmp };
	ir_node  *one         = new_r_Const_one(irg, dest_mode);
	ir_node  *zero        = new_r_Const_null(irg, dest_mode);
	ir_node  *phi_in[2]   = { one, zero };

	set_irn_in(lower_block, ARRAY_SIZE(lower_in), lower_in);
	ir_node *phi = new_r_Phi(lower_block, ARRAY_SIZE(phi_in), phi_in, dest_mode);
Exemple #28
0
/**
 * Lower a Sel node. Do not touch Sels accessing entities on the frame type.
 */
static void lower_sel(ir_node *sel)
{
	ir_graph  *irg   = get_irn_irg(sel);
	ir_entity *ent   = get_Sel_entity(sel);
	ir_type   *owner = get_entity_owner(ent);
	dbg_info  *dbg   = get_irn_dbg_info(sel);
	ir_mode   *mode  = get_irn_mode(sel);
	ir_node   *bl    = get_nodes_block(sel);
	ir_node   *newn;

	/* we can only replace Sels when the layout of the owner type is decided. */
	if (get_type_state(owner) != layout_fixed)
		return;

	if (0 < get_Sel_n_indexs(sel)) {
		/* an Array access */
		ir_type *basetyp = get_entity_type(ent);
		ir_mode *basemode;
		ir_node *index;
		if (is_Primitive_type(basetyp))
			basemode = get_type_mode(basetyp);
		else
			basemode = mode_P_data;

		assert(basemode && "no mode for lowering Sel");
		assert((get_mode_size_bits(basemode) % 8 == 0) && "can not deal with unorthodox modes");
		index = get_Sel_index(sel, 0);

		if (is_Array_type(owner)) {
			ir_type *arr_ty = owner;
			size_t   dims   = get_array_n_dimensions(arr_ty);
			size_t  *map    = ALLOCAN(size_t, dims);
			ir_mode *mode_Int = get_reference_mode_signed_eq(mode);
			ir_tarval *tv;
			ir_node *last_size;
			size_t   i;

			assert(dims == (size_t)get_Sel_n_indexs(sel)
				&& "array dimension must match number of indices of Sel node");

			for (i = 0; i < dims; i++) {
				size_t order = get_array_order(arr_ty, i);

				assert(order < dims &&
					"order of a dimension must be smaller than the arrays dim");
				map[order] = i;
			}
			newn = get_Sel_ptr(sel);

			/* Size of the array element */
			tv = new_tarval_from_long(get_type_size_bytes(basetyp), mode_Int);
			last_size = new_rd_Const(dbg, irg, tv);

			/*
			 * We compute the offset part of dimension d_i recursively
			 * with the the offset part of dimension d_{i-1}
			 *
			 *     off_0 = sizeof(array_element_type);
			 *     off_i = (u_i - l_i) * off_{i-1}  ; i >= 1
			 *
			 * whereas u_i is the upper bound of the current dimension
			 * and l_i the lower bound of the current dimension.
			 */
			for (i = dims; i > 0;) {
				size_t dim = map[--i];
				ir_node *lb, *ub, *elms, *n, *ind;

				elms = NULL;
				lb = get_array_lower_bound(arr_ty, dim);
				ub = get_array_upper_bound(arr_ty, dim);

				if (! is_Unknown(lb))
					lb = new_rd_Conv(dbg, bl, copy_const_value(get_irn_dbg_info(sel), lb, bl), mode_Int);
				else
					lb = NULL;

				if (! is_Unknown(ub))
					ub = new_rd_Conv(dbg, bl, copy_const_value(get_irn_dbg_info(sel), ub, bl), mode_Int);
				else
					ub = NULL;

				/*
				 * If the array has more than one dimension, lower and upper
				 * bounds have to be set in the non-last dimension.
				 */
				if (i > 0) {
					assert(lb != NULL && "lower bound has to be set in multi-dim array");
					assert(ub != NULL && "upper bound has to be set in multi-dim array");

					/* Elements in one Dimension */
					elms = new_rd_Sub(dbg, bl, ub, lb, mode_Int);
				}

				ind = new_rd_Conv(dbg, bl, get_Sel_index(sel, dim), mode_Int);

				/*
				 * Normalize index, id lower bound is set, also assume
				 * lower bound == 0
			 */
				if (lb != NULL)
					ind = new_rd_Sub(dbg, bl, ind, lb, mode_Int);

				n = new_rd_Mul(dbg, bl, ind, last_size, mode_Int);

				/*
				 * see comment above.
				 */
				if (i > 0)
					last_size = new_rd_Mul(dbg, bl, last_size, elms, mode_Int);

				newn = new_rd_Add(dbg, bl, newn, n, mode);
			}
		} else {
			/* no array type */
			ir_mode   *idx_mode = get_irn_mode(index);
			ir_tarval *tv       = new_tarval_from_long(get_mode_size_bytes(basemode), idx_mode);

			newn = new_rd_Add(dbg, bl, get_Sel_ptr(sel),
				new_rd_Mul(dbg, bl, index,
				new_r_Const(irg, tv),
				idx_mode),
				mode);
		}
	} else if (is_Method_type(get_entity_type(ent)) && is_Class_type(owner)) {
		/* We need an additional load when accessing methods from a dispatch
		 * table.
		 * Matze TODO: Is this really still used? At least liboo does its own
		 * lowering of Method-Sels...
		 */
		ir_mode   *ent_mode = get_type_mode(get_entity_type(ent));
		int        offset   = get_entity_offset(ent);
		ir_mode   *mode_Int = get_reference_mode_signed_eq(mode);
		ir_tarval *tv       = new_tarval_from_long(offset, mode_Int);
		ir_node   *cnst     = new_rd_Const(dbg, irg, tv);
		ir_node   *add      = new_rd_Add(dbg, bl, get_Sel_ptr(sel), cnst, mode);
		ir_node   *mem      = get_Sel_mem(sel);
		newn = new_rd_Load(dbg, bl, mem, add, ent_mode, cons_none);
		newn = new_r_Proj(newn, ent_mode, pn_Load_res);
	} else {
		int offset = get_entity_offset(ent);

		/* replace Sel by add(obj, const(ent.offset)) */
		newn = get_Sel_ptr(sel);
		if (offset != 0) {
			ir_mode   *mode_UInt = get_reference_mode_unsigned_eq(mode);
			ir_tarval *tv        = new_tarval_from_long(offset, mode_UInt);
			ir_node   *cnst      = new_r_Const(irg, tv);
			newn = new_rd_Add(dbg, bl, newn, cnst, mode);
		}
	}

	/* run the hooks */
	hook_lower(sel);

	exchange(sel, newn);
}
Exemple #29
0
static void lower_divmod(ir_node *node, ir_node *left, ir_node *right,
                         ir_node *mem, ir_mode *mode, int res_offset)
{
	dbg_info  *dbgi       = get_irn_dbg_info(node);
	ir_node   *block      = get_nodes_block(node);
	ir_node   *left_low   = get_lowered_low(left);
	ir_node   *left_high  = get_lowered_high(left);
	ir_node   *right_low  = get_lowered_low(right);
	ir_node   *right_high = get_lowered_high(right);
	ir_mode   *node_mode  = get_irn_mode(left);
	ir_entity *entity     = mode_is_signed(node_mode) ? ldivmod : uldivmod;
	ir_type   *mtp        = get_entity_type(entity);
	ir_graph  *irg        = get_irn_irg(node);
	ir_node   *addr       = new_r_Address(irg, entity);
	ir_node   *in[4];
	if (arm_cg_config.big_endian) {
		in[0] = left_high;
		in[1] = left_low;
		in[2] = right_high;
		in[3] = right_low;
	} else {
		in[0] = left_low;
		in[1] = left_high;
		in[2] = right_low;
		in[3] = right_high;
	}
	ir_node *call    = new_rd_Call(dbgi, block, mem, addr, ARRAY_SIZE(in), in,
	                               mtp);
	ir_node *resproj = new_r_Proj(call, mode_T, pn_Call_T_result);
	set_irn_pinned(call, get_irn_pinned(node));
	foreach_out_edge_safe(node, edge) {
		ir_node *proj = get_edge_src_irn(edge);
		if (!is_Proj(proj))
			continue;

		switch ((pn_Div)get_Proj_num(proj)) {
		case pn_Div_M:
			/* reroute to the call */
			set_Proj_pred(proj, call);
			set_Proj_num(proj, pn_Call_M);
			break;
		case pn_Div_X_regular:
			set_Proj_pred(proj, call);
			set_Proj_num(proj, pn_Call_X_regular);
			break;
		case pn_Div_X_except:
			set_Proj_pred(proj, call);
			set_Proj_num(proj, pn_Call_X_except);
			break;
		case pn_Div_res: {
			ir_mode *low_mode = get_irn_mode(left_low);
			if (arm_cg_config.big_endian) {
				ir_node *res_low  = new_r_Proj(resproj, low_mode, res_offset+1);
				ir_node *res_high = new_r_Proj(resproj, mode,     res_offset);
				ir_set_dw_lowered(proj, res_low, res_high);
			} else {
				ir_node *res_low  = new_r_Proj(resproj, low_mode, res_offset);
				ir_node *res_high = new_r_Proj(resproj, mode,     res_offset+1);
				ir_set_dw_lowered(proj, res_low, res_high);
			}
			break;
		}
		}
		/* mark this proj: we have handled it already, otherwise we might fall
		 * into out new nodes. */
		mark_irn_visited(proj);
	}
Exemple #30
0
/**
 * lower 64bit conversions
 */
static void ia32_lower_conv64(ir_node *node, ir_mode *mode)
{
	dbg_info  *dbg       = get_irn_dbg_info(node);
	ir_node   *op        = get_Conv_op(node);
	ir_mode   *mode_from = get_irn_mode(op);
	ir_mode   *mode_to   = get_irn_mode(node);

	if (mode_is_float(mode_from) && get_mode_size_bits(mode_to) == 64
	    && get_mode_arithmetic(mode_to) == irma_twos_complement) {
		/* We have a Conv float -> long long here */
		ir_node *float_to_ll;
		ir_node *l_res;
		ir_node *h_res;
		if (mode_is_signed(mode)) {
			/* convert from float to signed 64bit */
			ir_node *block = get_nodes_block(node);
			float_to_ll = new_bd_ia32_l_FloattoLL(dbg, block, op);
			l_res = new_r_Proj(float_to_ll, ia32_mode_gp,
			                   pn_ia32_l_FloattoLL_res_low);
			h_res = new_r_Proj(float_to_ll, mode,
							   pn_ia32_l_FloattoLL_res_high);
		} else {
			/* Convert from float to unsigned 64bit. */
			ir_graph  *irg = get_irn_irg(node);
			ir_tarval *flt_tv
				= new_tarval_from_str("9223372036854775808", 19, x86_mode_E);
			ir_node   *flt_corr  = new_r_Const(irg, flt_tv);

			ir_node *lower_blk = part_block_dw(node);
			ir_node *upper_blk = get_nodes_block(node);
			set_dw_control_flow_changed();

			ir_node *opc  = new_rd_Conv(dbg, upper_blk, op, x86_mode_E);
			ir_node *cmp  = new_rd_Cmp(dbg, upper_blk, opc, flt_corr,
			                           ir_relation_less);
			ir_node *cond = new_rd_Cond(dbg, upper_blk, cmp);
			ir_node *in[] = {
				new_r_Proj(cond, mode_X, pn_Cond_true),
				new_r_Proj(cond, mode_X, pn_Cond_false)
			};
			ir_node *blk   = new_r_Block(irg, 1, &in[1]);
			in[1] = new_r_Jmp(blk);

			set_irn_in(lower_blk, 2, in);

			/* create to Phis */
			ir_node *phi_in[] = {
				new_r_Const_null(irg, mode),
				new_r_Const_long(irg, mode, 0x80000000)
			};
			ir_node *int_phi
				= new_r_Phi(lower_blk, ARRAY_SIZE(phi_in), phi_in, mode);

			ir_node *fphi_in[] = {
				opc,
				new_rd_Sub(dbg, upper_blk, opc, flt_corr, x86_mode_E)
			};
			ir_node *flt_phi
				= new_r_Phi(lower_blk, ARRAY_SIZE(fphi_in), fphi_in,
				            x86_mode_E);

			/* fix Phi links for next part_block() */
			if (is_Phi(int_phi))
				add_Block_phi(lower_blk, int_phi);
			if (is_Phi(flt_phi))
				add_Block_phi(lower_blk, flt_phi);

			float_to_ll = new_bd_ia32_l_FloattoLL(dbg, lower_blk, flt_phi);
			l_res = new_r_Proj(float_to_ll, ia32_mode_gp,
							   pn_ia32_l_FloattoLL_res_low);
			h_res = new_r_Proj(float_to_ll, mode,
							   pn_ia32_l_FloattoLL_res_high);
			h_res = new_rd_Add(dbg, lower_blk, h_res, int_phi, mode);

			/* move the call and its Proj's to the lower block */
			set_nodes_block(node, lower_blk);
			for (ir_node *proj = (ir_node*)get_irn_link(node); proj != NULL;
			     proj = (ir_node*)get_irn_link(proj)) {
				set_nodes_block(proj, lower_blk);
			}
		}
		ir_set_dw_lowered(node, l_res, h_res);
	} else if (get_mode_size_bits(mode_from) == 64
	           && get_mode_arithmetic(mode_from) == irma_twos_complement
	           && mode_is_float(mode_to)) {
		/* We have a Conv long long -> float here */
		ir_node *op_low  = get_lowered_low(op);
		ir_node *op_high = get_lowered_high(op);
		ir_node *block   = get_nodes_block(node);
		ir_node *ll_to_float
			= new_bd_ia32_l_LLtoFloat(dbg, block, op_high, op_low, mode_to);

		exchange(node, ll_to_float);
	} else {
		ir_default_lower_dw_Conv(node, mode);
	}
}