void dmemory_lower_Alloc(ir_node *node) { assert(is_Alloc(node)); if (get_Alloc_where(node) != heap_alloc) return; ir_graph *irg = get_irn_irg(node); ir_type *type = get_Alloc_type(node); ir_node *count = get_Alloc_count(node); ir_node *res = NULL; ir_node *cur_mem = get_Alloc_mem(node); ir_node *block = get_nodes_block(node); if (is_Class_type(type)) { res = (*dmemory_model.alloc_object)(type, irg, block, &cur_mem); ddispatch_prepare_new_instance(type, res, irg, block, &cur_mem); } else if (is_Array_type(type)) { ir_type *eltype = get_array_element_type(type); res = (*dmemory_model.alloc_array)(eltype, count, irg, block, &cur_mem); } else { assert (0); } turn_into_tuple(node, pn_Alloc_max); set_irn_n(node, pn_Alloc_M, cur_mem); set_irn_n(node, pn_Alloc_res, res); }
void ia32_swap_left_right(ir_node *node) { ia32_attr_t *attr = get_ia32_attr(node); ir_node *left = get_irn_n(node, n_ia32_binary_left); ir_node *right = get_irn_n(node, n_ia32_binary_right); assert(is_ia32_commutative(node)); attr->ins_permuted = !attr->ins_permuted; set_irn_n(node, n_ia32_binary_left, right); set_irn_n(node, n_ia32_binary_right, left); }
void dmemory_lower_Arraylength(ir_node *arraylength) { ir_node *array_ref = get_Arraylength_arrayref(arraylength); ir_node *block = get_nodes_block(arraylength); ir_graph *irg = get_irn_irg(block); ir_node *cur_mem = get_Arraylength_mem(arraylength); ir_node *len = (*dmemory_model.get_arraylength)(array_ref, irg, block, &cur_mem); turn_into_tuple(arraylength, pn_Arraylength_max); set_irn_n(arraylength, pn_Arraylength_M, cur_mem); set_irn_n(arraylength, pn_Arraylength_res, len); }
static bool try_swap_inputs(ir_node *node) { /* commutative operation, just switch the inputs */ if (is_commutative(node)) { assert(get_amd64_attr_const(node)->op_mode == AMD64_OP_REG_REG); /* TODO: support Cmp input swapping */ ir_node *in0 = get_irn_n(node, 0); ir_node *in1 = get_irn_n(node, 1); set_irn_n(node, 0, in1); set_irn_n(node, 1, in0); return true; } return false; }
/* * The 64-bit version of libgcc does not contain some builtin * functions for 32-bit values (__<builtin>si2) anymore. */ static void widen_builtin(ir_node *node) { ir_type *mtp = get_Builtin_type(node); ir_type *arg1 = get_method_param_type(mtp, 0); // Nothing to do, if argument size is at least machine size. if (get_type_size(arg1) >= ir_target_pointer_size()) return; // Only touch builtins with no 32-bit version. ir_builtin_kind kind = get_Builtin_kind(node); if (kind != ir_bk_clz && kind != ir_bk_ctz && kind != ir_bk_ffs && kind != ir_bk_parity && kind != ir_bk_popcount) { return; } ir_mode *target_mode = get_reference_offset_mode(mode_P); dbg_info *dbgi = get_irn_dbg_info(node); ir_node *block = get_nodes_block(node); ir_node *op = get_irn_n(node, n_Builtin_max + 1); ir_node *conv = new_rd_Conv(dbgi, block, op, target_mode); set_irn_n(node, n_Builtin_max + 1, conv); ir_type *new_arg1 = get_type_for_mode(target_mode); ir_type *new_result = get_method_res_type(mtp, 0); ir_type *new_type = new_type_method(1, 1, false, cc_cdecl_set, mtp_no_property); set_method_param_type(new_type, 0, new_arg1); set_method_res_type(new_type, 0, new_result); set_Builtin_type(node, new_type); }
static void fix_address_pic_elf(ir_node *const node, void *const data) { (void)data; foreach_irn_in(node, i, pred) { if (!is_Address(pred)) continue; ir_entity *const entity = get_Address_entity(pred); if (is_tls_entity(entity)) continue; ir_graph *const irg = get_irn_irg(node); bool const ext_visible = is_externally_visible(entity); ir_node * res; if (i == n_Call_ptr && is_Call(node)) { /* We can compilation-unit local functions directly, everything else * goes through the PLT */ x86_immediate_kind_t const reloc = ext_visible ? X86_IMM_PLT : X86_IMM_PCREL; res = be_new_Relocation(irg, reloc, entity, mode_P); } else if (!ext_visible) { res = be_new_Relocation(irg, X86_IMM_PCREL, entity, mode_P); } else { res = create_gotpcrel_load(irg, entity); } set_irn_n(node, i, res); } }
static void fix_address_pic_mach_o(ir_node *const node, void *const data) { (void)data; foreach_irn_in(node, i, pred) { if (!is_Address(pred)) continue; ir_entity *const entity = get_Address_entity(pred); if (is_tls_entity(entity)) continue; ir_graph *const irg = get_irn_irg(node); ir_node * res; if (i == n_Call_ptr && is_Call(node)) { // Somehow we can always call PC relative. Are there trampolines // involved? res = be_new_Relocation(irg, X86_IMM_PCREL, entity, mode_P); } else if (entity_has_definition(entity) && !(get_entity_linkage(entity) & IR_LINKAGE_MERGE)) { res = be_new_Relocation(irg, X86_IMM_PCREL, entity, mode_P); } else { res = create_gotpcrel_load(irg, entity); } set_irn_n(node, i, res); } }
/** patches Addresses to work in position independent code */ static void fix_pic_addresses(ir_node *const node, void *const data) { (void)data; ir_graph *const irg = get_irn_irg(node); be_main_env_t *const be = be_get_irg_main_env(irg); foreach_irn_in(node, i, pred) { if (!is_Address(pred)) continue; ir_node *res; ir_entity *const entity = get_Address_entity(pred); dbg_info *const dbgi = get_irn_dbg_info(pred); if (i == n_Call_ptr && is_Call(node)) { /* Calls can jump to relative addresses, so we can directly jump to * the (relatively) known call address or the trampoline */ if (can_address_relative(entity)) continue; ir_entity *const trampoline = get_trampoline(be, entity); res = new_rd_Address(dbgi, irg, trampoline); } else if (get_entity_type(entity) == get_code_type()) { /* Block labels can always be addressed directly. */ continue; } else { /* Everything else is accessed relative to EIP. */ ir_node *const block = get_nodes_block(pred); ir_mode *const mode = get_irn_mode(pred); ir_node *const pic_base = ia32_get_pic_base(irg); if (can_address_relative(entity)) { /* All ok now for locally constructed stuff. */ res = new_rd_Add(dbgi, block, pic_base, pred, mode); /* Make sure the walker doesn't visit this add again. */ mark_irn_visited(res); } else { /* Get entry from pic symbol segment. */ ir_entity *const pic_symbol = get_pic_symbol(be, entity); ir_node *const pic_address = new_rd_Address(dbgi, irg, pic_symbol); ir_node *const add = new_rd_Add(dbgi, block, pic_base, pic_address, mode); mark_irn_visited(add); /* We need an extra indirection for global data outside our current * module. The loads are always safe and can therefore float and * need no memory input */ ir_type *const type = get_entity_type(entity); ir_node *const nomem = get_irg_no_mem(irg); ir_node *const load = new_rd_Load(dbgi, block, nomem, add, mode, type, cons_floats); res = new_r_Proj(load, mode, pn_Load_res); } } set_irn_n(node, i, res); } }
void ConvHandler::cleanUp(Node node) { if (is_Conv(node)) { Node child = node.getChild(0); if (/*is_Conv(child) && */ node.getMode() == child.getMode()) replaceNode(node, child); else if (is_Const(child)) replaceNode(node, new_r_Const_long(irg, node.getMode(), child.getTarval().getLong())); else if (is_Conv(child)) set_irn_n(node, 0, child.getChild(0)); } }
static void introduce_epilog(ir_node *ret) { arch_register_t const *const sp_reg = &arm_registers[REG_SP]; assert(arch_get_irn_register_req_in(ret, n_arm_Return_sp) == sp_reg->single_req); ir_node *const sp = get_irn_n(ret, n_arm_Return_sp); ir_node *const block = get_nodes_block(ret); ir_graph *const irg = get_irn_irg(ret); ir_type *const frame_type = get_irg_frame_type(irg); unsigned const frame_size = get_type_size_bytes(frame_type); ir_node *const incsp = be_new_IncSP(sp_reg, block, sp, -frame_size, 0); set_irn_n(ret, n_arm_Return_sp, incsp); sched_add_before(ret, incsp); }
void be_set_IncSP_pred(ir_node *incsp, ir_node *pred) { assert(be_is_IncSP(incsp)); set_irn_n(incsp, n_be_IncSP_pred, pred); }