/** * Copies a node to a new irg. The Ins of the new node point to * the predecessors on the old irg. n->link points to the new node. * * @param n The node to be copied * @param irg the new irg * * Does NOT copy standard nodes like Start, End etc that are fixed * in an irg. Instead, the corresponding nodes of the new irg are returned. * Note further, that the new nodes have no block. */ static void copy_irn_to_irg(ir_node *n, ir_graph *irg) { /* do not copy standard nodes */ ir_node *nn = NULL; switch (get_irn_opcode(n)) { case iro_NoMem: nn = get_irg_no_mem(irg); break; case iro_Block: { ir_graph *old_irg = get_irn_irg(n); if (n == get_irg_start_block(old_irg)) nn = get_irg_start_block(irg); else if (n == get_irg_end_block(old_irg)) nn = get_irg_end_block(irg); break; } case iro_Start: nn = get_irg_start(irg); break; case iro_End: nn = get_irg_end(irg); break; case iro_Proj: { ir_graph *old_irg = get_irn_irg(n); if (n == get_irg_frame(old_irg)) nn = get_irg_frame(irg); else if (n == get_irg_initial_mem(old_irg)) nn = get_irg_initial_mem(irg); else if (n == get_irg_args(old_irg)) nn = get_irg_args(irg); break; } } if (nn) { set_irn_link(n, nn); return; } nn = new_ir_node(get_irn_dbg_info(n), irg, NULL, /* no block yet, will be set later */ get_irn_op(n), get_irn_mode(n), get_irn_arity(n), get_irn_in(n)); /* Copy the attributes. These might point to additional data. If this was allocated on the old obstack the pointers now are dangling. This frees e.g. the memory of the graph_arr allocated in new_immBlock. */ copy_node_attr(irg, n, nn); set_irn_link(n, nn); }
static ir_node *create_fpu_mode_reload(void *const env, ir_node *const state, ir_node *const spill, ir_node *const before, ir_node *const last_state) { (void)env; (void)state; ir_node *reload; ir_node *const block = get_nodes_block(before); ir_graph *const irg = get_irn_irg(block); ir_node *const noreg = ia32_new_NoReg_gp(irg); ir_node *const nomem = get_irg_no_mem(irg); if (ia32_cg_config.use_unsafe_floatconv) { reload = new_bd_ia32_FldCW(NULL, block, noreg, noreg, nomem); ir_entity *const rounding_mode = spill ? create_ent(&fpcw_round, 0xC7F, "_fpcw_round") : create_ent(&fpcw_truncate, 0x37F, "_fpcw_truncate"); set_ia32_am_ent(reload, rounding_mode); } else { ir_node *mem; ir_node *const frame = get_irg_frame(irg); if (spill) { mem = spill; } else { assert(last_state); ir_node *const cwstore = create_fnstcw(block, frame, noreg, nomem, last_state); sched_add_before(before, cwstore); ir_node *const load = new_bd_ia32_Load(NULL, block, frame, noreg, cwstore); set_ia32_op_type(load, ia32_AddrModeS); set_ia32_ls_mode(load, mode_Hu); set_ia32_frame_use(load, IA32_FRAME_USE_32BIT); sched_add_before(before, load); ir_node *const load_res = new_r_Proj(load, ia32_mode_gp, pn_ia32_Load_res); /* TODO: Make the actual mode configurable in ChangeCW. */ ir_node *const or_const = ia32_create_Immediate(irg, 0xC00); ir_node *const orn = new_bd_ia32_Or(NULL, block, noreg, noreg, nomem, load_res, or_const); sched_add_before(before, orn); ir_node *const store = new_bd_ia32_Store(NULL, block, frame, noreg, nomem, orn); set_ia32_op_type(store, ia32_AddrModeD); /* Use ia32_mode_gp, as movl has a shorter opcode than movw. */ set_ia32_ls_mode(store, ia32_mode_gp); set_ia32_frame_use(store, IA32_FRAME_USE_32BIT); sched_add_before(before, store); mem = new_r_Proj(store, mode_M, pn_ia32_Store_M); } reload = new_bd_ia32_FldCW(NULL, block, frame, noreg, mem); } set_ia32_op_type(reload, ia32_AddrModeS); set_ia32_ls_mode(reload, ia32_mode_fpcw); set_ia32_frame_use(reload, IA32_FRAME_USE_32BIT); arch_set_irn_register(reload, &ia32_registers[REG_FPCW]); sched_add_before(before, reload); return reload; }
static ir_node *arm_new_spill(ir_node *value, ir_node *after) { ir_node *block = get_block(after); ir_graph *irg = get_irn_irg(after); ir_node *frame = get_irg_frame(irg); ir_node *mem = get_irg_no_mem(irg); ir_mode *mode = get_irn_mode(value); ir_node *store = new_bd_arm_Str(NULL, block, frame, value, mem, mode, NULL, false, 0, true); arch_add_irn_flags(store, arch_irn_flag_spill); sched_add_after(after, store); return store; }
static ir_node *arm_new_reload(ir_node *value, ir_node *spill, ir_node *before) { ir_node *block = get_block(before); ir_graph *irg = get_irn_irg(before); ir_node *frame = get_irg_frame(irg); ir_mode *mode = get_irn_mode(value); ir_node *load = new_bd_arm_Ldr(NULL, block, frame, spill, mode, NULL, false, 0, true); ir_node *proj = new_r_Proj(load, mode, pn_arm_Ldr_res); arch_add_irn_flags(load, arch_irn_flag_reload); sched_add_before(before, load); return proj; }
/* * Check, if the value of a node cannot represent a NULL pointer. * * - Sels are skipped * - A SymConst(entity) is NEVER a NULL pointer * - Confirms are evaluated */ int value_not_null(const ir_node *n, const ir_node **confirm) { ir_tarval *tv; *confirm = NULL; tv = value_of(n); if (tarval_is_constant(tv) && ! tarval_is_null(tv)) return 1; assert(mode_is_reference(get_irn_mode(n))); /* skip all Sel nodes */ while (is_Sel(n)) { n = get_Sel_ptr(n); } while (1) { if (is_Proj(n)) { n = get_Proj_pred(n); continue; } break; } if (is_SymConst_addr_ent(n)) { /* global references are never NULL */ return 1; } else if (n == get_irg_frame(get_irn_irg(n))) { /* local references are never NULL */ return 1; } else if (is_Alloc(n)) { /* alloc never returns NULL (it throws an exception instead) */ return 1; } else { /* check for more Confirms */ for (; is_Confirm(n); n = get_Confirm_value(n)) { if (get_Confirm_relation(n) == ir_relation_less_greater) { ir_node *bound = get_Confirm_bound(n); ir_tarval *tv = value_of(bound); if (tarval_is_null(tv)) { *confirm = n; return 1; } } } } return 0; }
static ir_node *create_fpu_mode_spill(void *const env, ir_node *const state, bool const force, ir_node *const after) { (void)env; if (!force && is_ia32_ChangeCW(state)) return NULL; ir_node *spill; ir_node *const block = get_nodes_block(state); /* Don't spill the fpcw in unsafe mode. */ if (ia32_cg_config.use_unsafe_floatconv) { spill = new_bd_ia32_FnstCWNOP(NULL, block, state); } else { ir_graph *const irg = get_irn_irg(state); ir_node *const noreg = ia32_new_NoReg_gp(irg); ir_node *const nomem = get_irg_no_mem(irg); ir_node *const frame = get_irg_frame(irg); spill = create_fnstcw(block, frame, noreg, nomem, state); } sched_add_after(skip_Proj(after), spill); return spill; }
/* * Optimize the frame type of an irg by removing * never touched entities. */ void opt_frame_irg(ir_graph *irg) { ir_type *frame_tp = get_irg_frame_type(irg); ir_entity *ent, *list; ir_node *frame, *sel; size_t i, n = get_class_n_members(frame_tp); int o; if (n <= 0) return; assure_irg_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_OUTS); irp_reserve_resources(irp, IRP_RESOURCE_ENTITY_LINK); /* clear all entity links */ for (i = n; i > 0;) { ent = get_class_member(frame_tp, --i); set_entity_link(ent, NULL); } /* look for uses */ frame = get_irg_frame(irg); /* mark all used entities */ for (o = get_irn_n_outs(frame) - 1; o >= 0; --o) { sel = get_irn_out(frame, o); if (is_Sel(sel)) { ent = get_Sel_entity(sel); /* only entities on the frame */ if (get_entity_owner(ent) == frame_tp) set_entity_link(ent, ent); } } /* link unused ones */ list = NULL; for (i = n; i > 0;) { ent = get_class_member(frame_tp, --i); /* beware of inner functions: those are NOT unused */ if (get_entity_link(ent) == NULL && !is_method_entity(ent)) { set_entity_link(ent, list); list = ent; } } if (list != NULL) { /* delete list members */ for (ent = list; ent; ent = list) { list = (ir_entity*)get_entity_link(ent); free_entity(ent); } /* we changed the frame type, its layout should be redefined */ set_type_state(frame_tp, layout_undefined); } irp_free_resources(irp, IRP_RESOURCE_ENTITY_LINK); /* we changed the type, this affects none of the currently known graph * properties, but I don't use ALL because I don't know if someone adds * type-based properties at some point */ confirm_irg_properties(irg, IR_GRAPH_PROPERTIES_CONTROL_FLOW | IR_GRAPH_PROPERTY_NO_BADS | IR_GRAPH_PROPERTY_NO_TUPLES | IR_GRAPH_PROPERTY_CONSISTENT_OUT_EDGES | IR_GRAPH_PROPERTY_CONSISTENT_OUTS | IR_GRAPH_PROPERTY_CONSISTENT_ENTITY_USAGE | IR_GRAPH_PROPERTY_MANY_RETURNS); }