/** * checks whether a node is an arg */ static int is_arg(ir_node *node) { if (! is_Proj(node)) return 0; node = get_Proj_pred(node); if (! is_Proj(node)) return 0; node = get_Proj_pred(node); return is_Start(node); }
/** * lower Alloca nodes to allocate "bytes" instead of a certain type */ static void lower_alloca_free(ir_node *node, void *data) { (void) data; if (is_Alloc(node)) { } else if (is_Proj(node)) { ir_node *proj_pred = get_Proj_pred(node); if (is_Alloc(proj_pred)) { transform_Proj_Alloc(node); } return; } else { return; } if (!ir_nodeset_insert(&transformed, node)) return; if (stack_alignment <= 1) return; ir_node *const size = get_Alloc_size(node); ir_node *const mem = get_Alloc_mem(node); ir_node *const block = get_nodes_block(node); dbg_info *const dbgi = get_irn_dbg_info(node); ir_node *const new_size = adjust_alloc_size(dbgi, size, block); ir_node *const new_node = new_rd_Alloc(dbgi, block, mem, new_size, 1); ir_nodeset_insert(&transformed, new_node); if (new_node != node) exchange(node, new_node); }
void eh_lower_Raise(ir_node *raise, ir_node *proj) { assert (is_Raise(raise) && is_Proj(proj)); ir_node *ex_obj = get_Raise_exo_ptr(raise); ir_node *block = get_nodes_block(raise); ir_graph *irg = get_irn_irg(raise); ir_node *cur_mem = get_Raise_mem(raise); ir_node *c_symc = new_r_SymConst(irg, throw_entity); ir_node *in[1] = { ex_obj }; ir_node *throw = new_r_Call(block, cur_mem, c_symc, 1, in, get_entity_type(throw_entity)); ir_set_throws_exception(throw, 1); exchange(raise, throw); set_Proj_num(proj, pn_Call_X_except); }
/* * Check, if the value of a node cannot represent a NULL pointer. * * - Sels are skipped * - A SymConst(entity) is NEVER a NULL pointer * - Confirms are evaluated */ int value_not_null(const ir_node *n, const ir_node **confirm) { ir_tarval *tv; *confirm = NULL; tv = value_of(n); if (tarval_is_constant(tv) && ! tarval_is_null(tv)) return 1; assert(mode_is_reference(get_irn_mode(n))); /* skip all Sel nodes */ while (is_Sel(n)) { n = get_Sel_ptr(n); } while (1) { if (is_Proj(n)) { n = get_Proj_pred(n); continue; } break; } if (is_SymConst_addr_ent(n)) { /* global references are never NULL */ return 1; } else if (n == get_irg_frame(get_irn_irg(n))) { /* local references are never NULL */ return 1; } else if (is_Alloc(n)) { /* alloc never returns NULL (it throws an exception instead) */ return 1; } else { /* check for more Confirms */ for (; is_Confirm(n); n = get_Confirm_value(n)) { if (get_Confirm_relation(n) == ir_relation_less_greater) { ir_node *bound = get_Confirm_bound(n); ir_tarval *tv = value_of(bound); if (tarval_is_null(tv)) { *confirm = n; return 1; } } } } return 0; }
/** Transforms: * a * | * Tuple * | => * Proj x a */ static void exchange_tuple_projs(ir_node *node, void *env) { bool *changed = (bool*)env; if (!is_Proj(node)) return; /* Handle Tuple(Tuple,...) case. */ exchange_tuple_projs(get_Proj_pred(node), env); ir_node *pred = get_Proj_pred(node); if (!is_Tuple(pred)) return; unsigned pn = get_Proj_num(node); ir_node *tuple_pred = get_Tuple_pred(pred, pn); exchange(node, tuple_pred); *changed = true; }
/** * Edge hook to dump the schedule edges. */ static void sched_edge_hook(FILE *F, const ir_node *irn) { ir_graph *irg = get_irn_irg(irn); if (!irg_is_constrained(irg, IR_GRAPH_CONSTRAINT_BACKEND)) return; if (is_Proj(irn) || is_Block(irn) || !sched_is_scheduled(irn)) return; ir_node *const prev = sched_prev(irn); if (!sched_is_begin(prev)) { fprintf(F, "edge:{sourcename: "); print_nodeid(F, irn); fprintf(F, " targetname: "); print_nodeid(F, prev); fprintf(F, " color:magenta}\n"); } }
void be_info_new_node(ir_graph *irg, ir_node *node) { /* Projs need no be info, all info is fetched from their predecessor */ if (is_Proj(node)) return; struct obstack *obst = be_get_be_obst(irg); backend_info_t *info = OALLOCZ(obst, backend_info_t); assert(node->backend_info == NULL); node->backend_info = info; /* * Set backend info for some middleend nodes which still appear in * backend graphs */ arch_irn_flags_t flags = arch_irn_flag_not_scheduled; arch_register_req_t const *req = arch_no_register_req; switch (get_irn_opcode(node)) { case iro_Block: case iro_Dummy: case iro_Anchor: case iro_Bad: case iro_End: case iro_Unknown: break; case iro_NoMem: case iro_Pin: case iro_Sync: req = arch_memory_req; break; case iro_Phi: flags = arch_irn_flag_schedule_first; break; default: return; } info->flags = flags; info->out_infos = NEW_ARR_DZ(reg_out_info_t, obst, 1); info->out_infos[0].req = req; }
static ir_node *transform_proj(ir_node *node) { ir_node *pred = get_Proj_pred(node); ir_op *pred_op = get_irn_op(pred); be_transform_func *proj_transform = (be_transform_func*)pred_op->ops.generic1; /* we should have a Proj transformer registered */ #ifdef DEBUG_libfirm if (!proj_transform) { unsigned const node_pn = get_Proj_num(node); if (is_Proj(pred)) { unsigned const pred_pn = get_Proj_num(pred); ir_node *const pred_pred = get_Proj_pred(pred); panic("no transformer for %+F (%u) -> %+F (%u) -> %+F", node, node_pn, pred, pred_pn, pred_pred); } else { panic("no transformer for %+F (%u) -> %+F", node, node_pn, pred); } } #endif return proj_transform(node); }
static void dump_backend_info_hook(void *context, FILE *F, const ir_node *node) { (void)context; ir_graph *const irg = get_irn_irg(node); if (!irg_is_constrained(irg, IR_GRAPH_CONSTRAINT_BACKEND)) return; be_dump_reqs_and_registers(F, node); if (is_Block(node)) { be_lv_t *const lv = be_get_irg_liveness(irg); if (lv->sets_valid) be_dump_liveness_block(lv, F, node); } #ifndef NDEBUG if (!is_Proj(node)) { char const *const orig = be_get_info(node)->orig_node; fprintf(F, "orig node = %s\n", orig ? orig : "n/a"); } #endif }
/** * Transforms a Div into the appropriate soft float function. */ static bool lower_Div(ir_node *const n) { ir_mode *const mode = get_Div_resmode(n); if (!mode_is_float(mode)) return false; ir_node *const left = get_Div_left(n); ir_node *const right = get_Div_right(n); ir_node *const in[] = { left, right }; ir_node *const result = make_softfloat_call(n, "div", ARRAY_SIZE(in), in); ir_node *const call = skip_Proj(skip_Proj(result)); set_irn_pinned(call, get_irn_pinned(n)); foreach_out_edge_safe(n, edge) { ir_node *proj = get_edge_src_irn(edge); if (!is_Proj(proj)) continue; switch ((pn_Div)get_Proj_num(proj)) { case pn_Div_M: set_Proj_pred(proj, call); set_Proj_num(proj, pn_Call_M); continue; case pn_Div_X_regular: set_Proj_pred(proj, call); set_Proj_num(proj, pn_Call_X_regular); continue; case pn_Div_X_except: set_Proj_pred(proj, call); set_Proj_num(proj, pn_Call_X_except); continue; case pn_Div_res: exchange(proj, result); continue; } panic("unexpected Proj number"); }
static void infer_typeinfo_walker(ir_node *irn, void *env) { bool *changed = (bool*) env; // A node's type needs only to be calculated once. if (get_irn_typeinfo_type(irn) != initial_type) return; if (is_Alloc(irn)) { // this one is easy, we know the exact dynamic type. ir_type *type = get_Alloc_type(irn); if (! is_Class_type(type)) return; set_irn_typeinfo_type(irn, type); *changed = true; } else if (is_Sel(irn) || is_SymConst_addr_ent(irn)) { // the type we determine here is the one of the entity we select or reference. // the transform_Sel method below will use the type incoming on the Sel_ptr input. ir_type *type = get_Sel_or_SymConst_type(irn); if (! type) return; ir_type *one_alive = get_alive_subclass(type); if (! one_alive) return; set_irn_typeinfo_type(irn, one_alive); *changed = true; } else if (is_Call(irn)) { // the dynamic type of the call result is the return type of the called entity. ir_node *call_pred = get_Call_ptr(irn); ir_type *pred_type = get_irn_typeinfo_type(call_pred); if (pred_type == initial_type) return; set_irn_typeinfo_type(irn, pred_type); *changed = true; } else if (is_Load(irn)) { // the dynamic type of the Load result is the type of the loaded entity. ir_node *load_pred = get_Load_ptr(irn); if (! is_Sel(load_pred) && !is_SymConst_addr_ent(load_pred)) return; ir_type *pred_type = get_irn_typeinfo_type(load_pred); if (pred_type == initial_type) return; set_irn_typeinfo_type(irn, pred_type); *changed = true; } else if (is_Proj(irn)) { // Types have to be propagated through Proj nodes (XXX: and also through Cast and Confirm ir_mode *pmode = get_irn_mode(irn); if (pmode != mode_P) return; ir_node *proj_pred = get_Proj_pred(irn); if (is_Proj(proj_pred) && get_irn_mode(proj_pred) == mode_T && get_Proj_proj(proj_pred) == pn_Call_T_result && is_Call(get_Proj_pred(proj_pred))) proj_pred = get_Proj_pred(proj_pred); // skip the result tuple ir_type *pred_type = get_irn_typeinfo_type(proj_pred); if (pred_type == initial_type) return; set_irn_typeinfo_type(irn, pred_type); *changed = true; } else if (is_Phi(irn)) { // Phi nodes are a special case because the incoming type information must be merged // A Phi node's type is unknown until all inputs are known to be the same dynamic type. ir_mode *pmode = get_irn_mode(irn); if (pmode != mode_P) return; int phi_preds = get_Phi_n_preds(irn); ir_type *last = NULL; for (int p = 0; p < phi_preds; p++) { ir_node *pred = get_Phi_pred(irn, p); ir_type *pred_type = get_irn_typeinfo_type(pred); if (pred_type == initial_type) return; if (p && last != pred_type) return; last = pred_type; } set_irn_typeinfo_type(irn, last); } }
static void lower_divmod(ir_node *node, ir_node *left, ir_node *right, ir_node *mem, ir_mode *mode, int res_offset) { dbg_info *dbgi = get_irn_dbg_info(node); ir_node *block = get_nodes_block(node); ir_node *left_low = get_lowered_low(left); ir_node *left_high = get_lowered_high(left); ir_node *right_low = get_lowered_low(right); ir_node *right_high = get_lowered_high(right); ir_mode *node_mode = get_irn_mode(left); ir_entity *entity = mode_is_signed(node_mode) ? ldivmod : uldivmod; ir_type *mtp = get_entity_type(entity); ir_graph *irg = get_irn_irg(node); ir_node *addr = new_r_Address(irg, entity); ir_node *in[4]; if (arm_cg_config.big_endian) { in[0] = left_high; in[1] = left_low; in[2] = right_high; in[3] = right_low; } else { in[0] = left_low; in[1] = left_high; in[2] = right_low; in[3] = right_high; } ir_node *call = new_rd_Call(dbgi, block, mem, addr, ARRAY_SIZE(in), in, mtp); ir_node *resproj = new_r_Proj(call, mode_T, pn_Call_T_result); set_irn_pinned(call, get_irn_pinned(node)); foreach_out_edge_safe(node, edge) { ir_node *proj = get_edge_src_irn(edge); if (!is_Proj(proj)) continue; switch ((pn_Div)get_Proj_num(proj)) { case pn_Div_M: /* reroute to the call */ set_Proj_pred(proj, call); set_Proj_num(proj, pn_Call_M); break; case pn_Div_X_regular: set_Proj_pred(proj, call); set_Proj_num(proj, pn_Call_X_regular); break; case pn_Div_X_except: set_Proj_pred(proj, call); set_Proj_num(proj, pn_Call_X_except); break; case pn_Div_res: { ir_mode *low_mode = get_irn_mode(left_low); if (arm_cg_config.big_endian) { ir_node *res_low = new_r_Proj(resproj, low_mode, res_offset+1); ir_node *res_high = new_r_Proj(resproj, mode, res_offset); ir_set_dw_lowered(proj, res_low, res_high); } else { ir_node *res_low = new_r_Proj(resproj, low_mode, res_offset); ir_node *res_high = new_r_Proj(resproj, mode, res_offset+1); ir_set_dw_lowered(proj, res_low, res_high); } break; } } /* mark this proj: we have handled it already, otherwise we might fall * into out new nodes. */ mark_irn_visited(proj); }
void x86_create_address_mode(x86_address_t *addr, ir_node *node, x86_create_am_flags_t flags) { addr->imm.kind = X86_IMM_VALUE; if (eat_immediate(addr, node, true)) { addr->variant = addr->ip_base ? X86_ADDR_RIP : X86_ADDR_JUST_IMM; return; } assert(!addr->ip_base); if (!(flags & x86_create_am_force) && x86_is_non_address_mode_node(node) && (!(flags & x86_create_am_double_use) || get_irn_n_edges(node) > 2)) { addr->variant = X86_ADDR_BASE; addr->base = node; return; } ir_node *eat_imms = eat_immediates(addr, node, flags, false); if (eat_imms != node) { if (flags & x86_create_am_force) eat_imms = be_skip_downconv(eat_imms, true); node = eat_imms; if (x86_is_non_address_mode_node(node)) { addr->variant = X86_ADDR_BASE; addr->base = node; return; } } /* starting point Add, Sub or Shl, FrameAddr */ if (is_Shl(node)) { /* We don't want to eat add x, x as shl here, so only test for real Shl * instructions, because we want the former as Lea x, x, not Shl x, 1 */ if (eat_shl(addr, node)) { addr->variant = X86_ADDR_INDEX; return; } } else if (eat_immediate(addr, node, true)) { /* we can hit this case in x86_create_am_force mode */ addr->variant = addr->ip_base ? X86_ADDR_RIP : X86_ADDR_JUST_IMM; return; } else if (is_Add(node)) { ir_node *left = get_Add_left(node); ir_node *right = get_Add_right(node); if (flags & x86_create_am_force) { left = be_skip_downconv(left, true); right = be_skip_downconv(right, true); } left = eat_immediates(addr, left, flags, false); right = eat_immediates(addr, right, flags, false); if (eat_shl(addr, left)) { left = NULL; } else if (eat_shl(addr, right)) { right = NULL; } /* (x & 0xFFFFFFFC) + (x >> 2) -> lea(x >> 2, x >> 2, 4) */ if (left != NULL && right != NULL) { ir_node *and; ir_node *shr; if (is_And(left) && (is_Shr(right) || is_Shrs(right))) { and = left; shr = right; goto tryit; } if (is_And(right) && (is_Shr(left) || is_Shrs(left))) { and = right; shr = left; tryit: if (get_And_left(and) == get_binop_left(shr)) { ir_node *and_right = get_And_right(and); ir_node *shr_right = get_binop_right(shr); if (is_Const(and_right) && is_Const(shr_right)) { ir_tarval *and_mask = get_Const_tarval(and_right); ir_tarval *shift_amount = get_Const_tarval(shr_right); ir_mode *mode = get_irn_mode(and); ir_tarval *all_one = get_mode_all_one(mode); ir_tarval *shift_mask = tarval_shl(tarval_shr(all_one, shift_amount), shift_amount); long val = get_tarval_long(shift_amount); if (and_mask == shift_mask && val >= 0 && val <= 3) { addr->variant = X86_ADDR_BASE_INDEX; addr->base = shr; addr->index = shr; addr->scale = val; return; } } } } } if (left != NULL) { ir_node *base = addr->base; if (base == NULL) { addr->variant = addr->index != NULL ? X86_ADDR_BASE_INDEX : X86_ADDR_BASE; addr->base = left; } else { addr->variant = X86_ADDR_BASE_INDEX; assert(addr->index == NULL && addr->scale == 0); assert(right == NULL); /* esp must be used as base */ if (is_Proj(left) && is_Start(get_Proj_pred(left))) { addr->index = base; addr->base = left; } else { addr->index = left; } } } if (right != NULL) { ir_node *base = addr->base; if (base == NULL) { addr->variant = addr->index != NULL ? X86_ADDR_BASE_INDEX : X86_ADDR_BASE; addr->base = right; } else { addr->variant = X86_ADDR_BASE_INDEX; assert(addr->index == NULL && addr->scale == 0); /* esp must be used as base */ if (is_Proj(right) && is_Start(get_Proj_pred(right))) { addr->index = base; addr->base = right; } else { addr->index = right; } } } return; } addr->variant = X86_ADDR_BASE; addr->base = node; }
/** * Compute the weight of a method parameter * * @param arg The parameter them weight muss be computed. */ static unsigned calc_method_param_weight(ir_node *arg) { /* We mark the nodes to avoid endless recursion */ mark_irn_visited(arg); unsigned weight = null_weight; for (int i = get_irn_n_outs(arg); i-- > 0; ) { ir_node *succ = get_irn_out(arg, i); if (irn_visited(succ)) continue; /* We should not walk over the memory edge.*/ if (get_irn_mode(succ) == mode_M) continue; switch (get_irn_opcode(succ)) { case iro_Call: if (get_Call_ptr(succ) == arg) { /* the arguments is used as an pointer input for a call, we can probably change an indirect Call into a direct one. */ weight += indirect_call_weight; } break; case iro_Cmp: { /* We have reached a cmp and we must increase the weight with the cmp_weight. */ ir_node *op; if (get_Cmp_left(succ) == arg) op = get_Cmp_right(succ); else op = get_Cmp_left(succ); if (is_irn_constlike(op)) { weight += const_cmp_weight; } else weight += cmp_weight; break; } case iro_Cond: /* the argument is used for a SwitchCond, a big win */ weight += const_cmp_weight * get_irn_n_outs(succ); break; case iro_Id: /* when looking backward we might find Id nodes */ weight += calc_method_param_weight(succ); break; case iro_Tuple: /* unoptimized tuple */ for (int j = get_Tuple_n_preds(succ); j-- > 0; ) { ir_node *pred = get_Tuple_pred(succ, j); if (pred == arg) { /* look for Proj(j) */ for (int k = get_irn_n_outs(succ); k-- > 0; ) { ir_node *succ_succ = get_irn_out(succ, k); if (is_Proj(succ_succ)) { if (get_Proj_proj(succ_succ) == j) { /* found */ weight += calc_method_param_weight(succ_succ); } } else { /* this should NOT happen */ } } } } break; default: if (is_binop(succ)) { /* We have reached a BinOp and we must increase the weight with the binop_weight. If the other operand of the BinOp is a constant we increase the weight with const_binop_weight and call the function recursive. */ ir_node *op; if (get_binop_left(succ) == arg) op = get_binop_right(succ); else op = get_binop_left(succ); if (is_irn_constlike(op)) { weight += const_binop_weight; weight += calc_method_param_weight(succ); } else weight += binop_weight; } else if (get_irn_arity(succ) == 1) { /* We have reached a binop and we must increase the weight with the const_binop_weight and call the function recursive.*/ weight += const_binop_weight; weight += calc_method_param_weight(succ); } break; } } set_irn_link(arg, NULL); return weight; }