static ir_node *create_fpu_mode_reload(void *const env, ir_node *const state, ir_node *const spill, ir_node *const before, ir_node *const last_state) { (void)env; (void)state; ir_node *reload; ir_node *const block = get_nodes_block(before); ir_graph *const irg = get_irn_irg(block); ir_node *const noreg = ia32_new_NoReg_gp(irg); ir_node *const nomem = get_irg_no_mem(irg); if (ia32_cg_config.use_unsafe_floatconv) { reload = new_bd_ia32_FldCW(NULL, block, noreg, noreg, nomem); ir_entity *const rounding_mode = spill ? create_ent(&fpcw_round, 0xC7F, "_fpcw_round") : create_ent(&fpcw_truncate, 0x37F, "_fpcw_truncate"); set_ia32_am_ent(reload, rounding_mode); } else { ir_node *mem; ir_node *const frame = get_irg_frame(irg); if (spill) { mem = spill; } else { assert(last_state); ir_node *const cwstore = create_fnstcw(block, frame, noreg, nomem, last_state); sched_add_before(before, cwstore); ir_node *const load = new_bd_ia32_Load(NULL, block, frame, noreg, cwstore); set_ia32_op_type(load, ia32_AddrModeS); set_ia32_ls_mode(load, mode_Hu); set_ia32_frame_use(load, IA32_FRAME_USE_32BIT); sched_add_before(before, load); ir_node *const load_res = new_r_Proj(load, ia32_mode_gp, pn_ia32_Load_res); /* TODO: Make the actual mode configurable in ChangeCW. */ ir_node *const or_const = ia32_create_Immediate(irg, 0xC00); ir_node *const orn = new_bd_ia32_Or(NULL, block, noreg, noreg, nomem, load_res, or_const); sched_add_before(before, orn); ir_node *const store = new_bd_ia32_Store(NULL, block, frame, noreg, nomem, orn); set_ia32_op_type(store, ia32_AddrModeD); /* Use ia32_mode_gp, as movl has a shorter opcode than movw. */ set_ia32_ls_mode(store, ia32_mode_gp); set_ia32_frame_use(store, IA32_FRAME_USE_32BIT); sched_add_before(before, store); mem = new_r_Proj(store, mode_M, pn_ia32_Store_M); } reload = new_bd_ia32_FldCW(NULL, block, frame, noreg, mem); } set_ia32_op_type(reload, ia32_AddrModeS); set_ia32_ls_mode(reload, ia32_mode_fpcw); set_ia32_frame_use(reload, IA32_FRAME_USE_32BIT); arch_set_irn_register(reload, &ia32_registers[REG_FPCW]); sched_add_before(before, reload); return reload; }
ir_node *be_new_Copy_before_reg(ir_node *const val, ir_node *const before, arch_register_t const *const reg) { ir_node *const block = get_nodes_block(before); ir_node *const copy = be_new_Copy(block, val); sched_add_before(before, copy); arch_set_irn_register_out(copy, 0, reg); return copy; }
static ir_node *arm_new_reload(ir_node *value, ir_node *spill, ir_node *before) { ir_node *block = get_block(before); ir_graph *irg = get_irn_irg(before); ir_node *frame = get_irg_frame(irg); ir_mode *mode = get_irn_mode(value); ir_node *load = new_bd_arm_Ldr(NULL, block, frame, spill, mode, NULL, false, 0, true); ir_node *proj = new_r_Proj(load, mode, pn_arm_Ldr_res); arch_add_irn_flags(load, arch_irn_flag_reload); sched_add_before(before, load); return proj; }
static void introduce_epilog(ir_node *ret) { arch_register_t const *const sp_reg = &arm_registers[REG_SP]; assert(arch_get_irn_register_req_in(ret, n_arm_Return_sp) == sp_reg->single_req); ir_node *const sp = get_irn_n(ret, n_arm_Return_sp); ir_node *const block = get_nodes_block(ret); ir_graph *const irg = get_irn_irg(ret); ir_type *const frame_type = get_irg_frame_type(irg); unsigned const frame_size = get_type_size_bytes(frame_type); ir_node *const incsp = be_new_IncSP(sp_reg, block, sp, -frame_size, 0); set_irn_n(ret, n_arm_Return_sp, incsp); sched_add_before(ret, incsp); }
/** * Transforms a Sub to a Neg + Add, which subsequently allows swapping * of the inputs. The swapping is also (implicitly) done here. */ static void transform_sub_to_neg_add(ir_node *node, const arch_register_t *out_reg) { ir_node *block = get_nodes_block(node); dbg_info *dbgi = get_irn_dbg_info(node); ir_node *in1 = get_irn_n(node, 0); ir_node *in2 = get_irn_n(node, 1); const arch_register_t *in2_reg = arch_get_irn_register(in2); const amd64_binop_addr_attr_t *attr = get_amd64_binop_addr_attr(node); ir_node *add; unsigned pos; if (is_amd64_subs(node)) { unsigned bits = x86_bytes_from_size(attr->base.base.size) * 8; ir_tarval *tv = get_mode_one(amd64_mode_xmm); tv = tarval_shl_unsigned(tv, bits - 1); ir_entity *sign_bit_const = create_float_const_entity(tv); amd64_binop_addr_attr_t xor_attr = { .base = { .base = { .op_mode = AMD64_OP_REG_ADDR, .size = X86_SIZE_64, }, }, }; init_lconst_addr(&xor_attr.base.addr, sign_bit_const); ir_node *xor_in[] = { in2 }; ir_node *const xor = new_bd_amd64_xorp(dbgi, block, ARRAY_SIZE(xor_in), xor_in, amd64_xmm_reqs, &xor_attr); sched_add_before(node, xor); ir_node *const neg = be_new_Proj_reg(xor, pn_amd64_xorp_res, in2_reg); ir_node *in[] = { neg, in1 }; add = new_bd_amd64_adds(dbgi, block, ARRAY_SIZE(in), in, amd64_xmm_xmm_reqs, attr); pos = pn_amd64_adds_res; } else {