int is_irn_const_expression(ir_node *n) { /* we are in danger iff an exception will arise. TODO: be more precisely, * for instance Div. will NOT rise if divisor != 0 */ if (is_binop(n) && !is_fragile_op(n)) return is_irn_const_expression(get_binop_left(n)) && is_irn_const_expression(get_binop_right(n)); switch (get_irn_opcode(n)) { case iro_Const: case iro_SymConst: case iro_Unknown: return 1; case iro_Conv: return is_irn_const_expression(get_irn_n(n, 0)); default: break; } return 0; }
void x86_create_address_mode(x86_address_t *addr, ir_node *node, x86_create_am_flags_t flags) { addr->imm.kind = X86_IMM_VALUE; if (eat_immediate(addr, node, true)) { addr->variant = addr->ip_base ? X86_ADDR_RIP : X86_ADDR_JUST_IMM; return; } assert(!addr->ip_base); if (!(flags & x86_create_am_force) && x86_is_non_address_mode_node(node) && (!(flags & x86_create_am_double_use) || get_irn_n_edges(node) > 2)) { addr->variant = X86_ADDR_BASE; addr->base = node; return; } ir_node *eat_imms = eat_immediates(addr, node, flags, false); if (eat_imms != node) { if (flags & x86_create_am_force) eat_imms = be_skip_downconv(eat_imms, true); node = eat_imms; if (x86_is_non_address_mode_node(node)) { addr->variant = X86_ADDR_BASE; addr->base = node; return; } } /* starting point Add, Sub or Shl, FrameAddr */ if (is_Shl(node)) { /* We don't want to eat add x, x as shl here, so only test for real Shl * instructions, because we want the former as Lea x, x, not Shl x, 1 */ if (eat_shl(addr, node)) { addr->variant = X86_ADDR_INDEX; return; } } else if (eat_immediate(addr, node, true)) { /* we can hit this case in x86_create_am_force mode */ addr->variant = addr->ip_base ? X86_ADDR_RIP : X86_ADDR_JUST_IMM; return; } else if (is_Add(node)) { ir_node *left = get_Add_left(node); ir_node *right = get_Add_right(node); if (flags & x86_create_am_force) { left = be_skip_downconv(left, true); right = be_skip_downconv(right, true); } left = eat_immediates(addr, left, flags, false); right = eat_immediates(addr, right, flags, false); if (eat_shl(addr, left)) { left = NULL; } else if (eat_shl(addr, right)) { right = NULL; } /* (x & 0xFFFFFFFC) + (x >> 2) -> lea(x >> 2, x >> 2, 4) */ if (left != NULL && right != NULL) { ir_node *and; ir_node *shr; if (is_And(left) && (is_Shr(right) || is_Shrs(right))) { and = left; shr = right; goto tryit; } if (is_And(right) && (is_Shr(left) || is_Shrs(left))) { and = right; shr = left; tryit: if (get_And_left(and) == get_binop_left(shr)) { ir_node *and_right = get_And_right(and); ir_node *shr_right = get_binop_right(shr); if (is_Const(and_right) && is_Const(shr_right)) { ir_tarval *and_mask = get_Const_tarval(and_right); ir_tarval *shift_amount = get_Const_tarval(shr_right); ir_mode *mode = get_irn_mode(and); ir_tarval *all_one = get_mode_all_one(mode); ir_tarval *shift_mask = tarval_shl(tarval_shr(all_one, shift_amount), shift_amount); long val = get_tarval_long(shift_amount); if (and_mask == shift_mask && val >= 0 && val <= 3) { addr->variant = X86_ADDR_BASE_INDEX; addr->base = shr; addr->index = shr; addr->scale = val; return; } } } } } if (left != NULL) { ir_node *base = addr->base; if (base == NULL) { addr->variant = addr->index != NULL ? X86_ADDR_BASE_INDEX : X86_ADDR_BASE; addr->base = left; } else { addr->variant = X86_ADDR_BASE_INDEX; assert(addr->index == NULL && addr->scale == 0); assert(right == NULL); /* esp must be used as base */ if (is_Proj(left) && is_Start(get_Proj_pred(left))) { addr->index = base; addr->base = left; } else { addr->index = left; } } } if (right != NULL) { ir_node *base = addr->base; if (base == NULL) { addr->variant = addr->index != NULL ? X86_ADDR_BASE_INDEX : X86_ADDR_BASE; addr->base = right; } else { addr->variant = X86_ADDR_BASE_INDEX; assert(addr->index == NULL && addr->scale == 0); /* esp must be used as base */ if (is_Proj(right) && is_Start(get_Proj_pred(right))) { addr->index = base; addr->base = right; } else { addr->index = right; } } } return; } addr->variant = X86_ADDR_BASE; addr->base = node; }
/** * Compute the weight of a method parameter * * @param arg The parameter them weight muss be computed. */ static unsigned calc_method_param_weight(ir_node *arg) { /* We mark the nodes to avoid endless recursion */ mark_irn_visited(arg); unsigned weight = null_weight; for (int i = get_irn_n_outs(arg); i-- > 0; ) { ir_node *succ = get_irn_out(arg, i); if (irn_visited(succ)) continue; /* We should not walk over the memory edge.*/ if (get_irn_mode(succ) == mode_M) continue; switch (get_irn_opcode(succ)) { case iro_Call: if (get_Call_ptr(succ) == arg) { /* the arguments is used as an pointer input for a call, we can probably change an indirect Call into a direct one. */ weight += indirect_call_weight; } break; case iro_Cmp: { /* We have reached a cmp and we must increase the weight with the cmp_weight. */ ir_node *op; if (get_Cmp_left(succ) == arg) op = get_Cmp_right(succ); else op = get_Cmp_left(succ); if (is_irn_constlike(op)) { weight += const_cmp_weight; } else weight += cmp_weight; break; } case iro_Cond: /* the argument is used for a SwitchCond, a big win */ weight += const_cmp_weight * get_irn_n_outs(succ); break; case iro_Id: /* when looking backward we might find Id nodes */ weight += calc_method_param_weight(succ); break; case iro_Tuple: /* unoptimized tuple */ for (int j = get_Tuple_n_preds(succ); j-- > 0; ) { ir_node *pred = get_Tuple_pred(succ, j); if (pred == arg) { /* look for Proj(j) */ for (int k = get_irn_n_outs(succ); k-- > 0; ) { ir_node *succ_succ = get_irn_out(succ, k); if (is_Proj(succ_succ)) { if (get_Proj_proj(succ_succ) == j) { /* found */ weight += calc_method_param_weight(succ_succ); } } else { /* this should NOT happen */ } } } } break; default: if (is_binop(succ)) { /* We have reached a BinOp and we must increase the weight with the binop_weight. If the other operand of the BinOp is a constant we increase the weight with const_binop_weight and call the function recursive. */ ir_node *op; if (get_binop_left(succ) == arg) op = get_binop_right(succ); else op = get_binop_left(succ); if (is_irn_constlike(op)) { weight += const_binop_weight; weight += calc_method_param_weight(succ); } else weight += binop_weight; } else if (get_irn_arity(succ) == 1) { /* We have reached a binop and we must increase the weight with the const_binop_weight and call the function recursive.*/ weight += const_binop_weight; weight += calc_method_param_weight(succ); } break; } } set_irn_link(arg, NULL); return weight; }