/** * Place operands of node into an address mode. * * @param addr the address mode data so far * @param node the node * @param flags the flags * * @return the folded node */ static ir_node *eat_immediates(x86_address_t *addr, ir_node *node, x86_create_am_flags_t flags, bool basereg_usable) { if (!(flags & x86_create_am_force) && x86_is_non_address_mode_node(node) && (!(flags & x86_create_am_double_use) || get_irn_n_edges(node) > 2)) return node; if (is_Add(node)) { ir_node *left = get_Add_left(node); ir_node *right = get_Add_right(node); if (eat_immediate(addr, left, basereg_usable)) return eat_immediates(addr, right, x86_create_am_normal, basereg_usable); if (eat_immediate(addr, right, basereg_usable)) return eat_immediates(addr, left, x86_create_am_normal, basereg_usable); } else if (is_Member(node)) { assert(addr->imm.entity == NULL); addr->imm.entity = get_Member_entity(node); addr->imm.kind = X86_IMM_FRAMEENT; ir_node *ptr = get_Member_ptr(node); assert(is_Start(get_Proj_pred(ptr))); return ptr; } return node; }
/** * lower 64bit addition: an 32bit add for the lower parts, an add with * carry for the higher parts. If the carry's value is known, fold it * into the upper add. */ static void ia32_lower_add64(ir_node *node, ir_mode *mode) { dbg_info *dbg = get_irn_dbg_info(node); ir_node *block = get_nodes_block(node); ir_node *left = get_Add_left(node); ir_node *right = get_Add_right(node); ir_node *left_low = get_lowered_low(left); ir_node *left_high = get_lowered_high(left); ir_node *right_low = get_lowered_low(right); ir_node *right_high = get_lowered_high(right); ir_mode *low_mode = get_irn_mode(left_low); ir_mode *high_mode = get_irn_mode(left_high); carry_result cr = lower_add_carry(left, right, low_mode); assert(get_irn_mode(left_low) == get_irn_mode(right_low)); assert(get_irn_mode(left_high) == get_irn_mode(right_high)); if (cr == no_carry) { ir_node *add_low = new_rd_Add(dbg, block, left_low, right_low, low_mode); ir_node *add_high = new_rd_Add(dbg, block, left_high, right_high, high_mode); ir_set_dw_lowered(node, add_low, add_high); } else if (cr == must_carry && (is_Const(left_high) || is_Const(right_high))) { // We cannot assume that left_high and right_high form a normalized Add. ir_node *constant; ir_node *other; if (is_Const(left_high)) { constant = left_high; other = right_high; } else { constant = right_high; other = left_high; } ir_graph *irg = get_irn_irg(right_high); ir_node *one = new_rd_Const(dbg, irg, get_mode_one(high_mode)); ir_node *const_plus_one = new_rd_Add(dbg, block, constant, one, high_mode); ir_node *add_high = new_rd_Add(dbg, block, other, const_plus_one, high_mode); ir_node *add_low = new_rd_Add(dbg, block, left_low, right_low, low_mode); ir_set_dw_lowered(node, add_low, add_high); } else { /* l_res = a_l + b_l */ ir_node *add_low = new_bd_ia32_l_Add(dbg, block, left_low, right_low); ir_mode *mode_flags = ia32_reg_classes[CLASS_ia32_flags].mode; ir_node *res_low = new_r_Proj(add_low, ia32_mode_gp, pn_ia32_l_Add_res); ir_node *flags = new_r_Proj(add_low, mode_flags, pn_ia32_l_Add_flags); /* h_res = a_h + b_h + carry */ ir_node *add_high = new_bd_ia32_l_Adc(dbg, block, left_high, right_high, flags, mode); ir_set_dw_lowered(node, res_low, add_high); } }
static bool eat_imm(x86_address_t *const addr, ir_node const *const node, bool basereg_usable) { switch (get_irn_opcode(node)) { case iro_Add: /* Add is supported as long as both operands are immediates. */ return !x86_is_non_address_mode_node(node) && eat_imm(addr, get_Add_left(node), basereg_usable) && eat_imm(addr, get_Add_right(node), basereg_usable); case iro_Address: /* The first Address of a DAG can be folded into an immediate. */ if (addr->imm.entity) return false; addr->imm.entity = get_Address_entity(node); addr->imm.kind = X86_IMM_ADDR; if (is_tls_entity(addr->imm.entity)) addr->tls_segment = true; return true; case iro_Const: { /* Add the value to the offset. */ ir_tarval *const tv = get_Const_tarval(node); if (!tarval_possible(tv)) return false; addr->imm.offset += get_tarval_long(tv); return true; } case iro_Unknown: /* Use '0' for Unknowns. */ return true; default: if (be_is_Relocation(node)) { if (addr->imm.entity) return false; addr->imm.entity = be_get_Relocation_entity(node); x86_immediate_kind_t const kind = (x86_immediate_kind_t)be_get_Relocation_kind(node); addr->imm.kind = kind; if (kind == X86_IMM_GOTPCREL || kind == X86_IMM_PCREL) { if (!basereg_usable) return false; addr->ip_base = true; } return true; } /* All other nodes are no immediates. */ return false; } }
/** * Transforms an Add into the appropriate soft float function. */ static bool lower_Add(ir_node *const n) { ir_mode *const mode = get_irn_mode(n); if (!mode_is_float(mode)) return false; ir_node *const left = get_Add_left(n); ir_node *const right = get_Add_right(n); ir_node *const in[] = { left, right }; ir_node *const result = make_softfloat_call(n, "add", ARRAY_SIZE(in), in); exchange(n, result); return true; }
/** * Try to place a Shl into an address mode. * * @param addr the address mode data so far * @param node the node to place * @return true on success */ static bool eat_shl(x86_address_t *addr, ir_node *node) { /* we can only eat a shl if we don't have a scale or index set yet */ if (addr->scale != 0 || addr->index != NULL) return false; ir_node *shifted_val; long val; if (is_Shl(node)) { /* we can use shl with 0,1,2 or 3 shift */ ir_node *right = get_Shl_right(node); if (!is_Const(right)) return false; ir_tarval *tv = get_Const_tarval(right); if (!tarval_is_long(tv)) return false; val = get_tarval_long(tv); if (val < 0 || val > 3) return false; if (val == 0) be_warningf(node, "found unoptimized Shl x,0"); shifted_val = get_Shl_left(node); } else if (is_Add(node)) { /* might be an add x, x */ ir_node *left = get_Add_left(node); ir_node *right = get_Add_right(node); if (left != right) return false; if (is_Const(left)) return false; val = 1; shifted_val = left; } else { return false; } if (x86_is_non_address_mode_node(node)) return false; addr->scale = val; addr->index = shifted_val; return true; }
static void lower64_add(ir_node *node, ir_mode *mode) { dbg_info *dbgi = get_irn_dbg_info(node); ir_node *block = get_nodes_block(node); ir_node *left = get_Add_left(node); ir_node *right = get_Add_right(node); ir_node *left_low = get_lowered_low(left); ir_node *left_high = get_lowered_high(left); ir_node *right_low = get_lowered_low(right); ir_node *right_high = get_lowered_high(right); ir_node *adds = new_bd_arm_AddS_t(dbgi, block, left_low, right_low); ir_mode *mode_low = get_irn_mode(left_low); ir_node *res_low = new_r_Proj(adds, mode_low, pn_arm_AddS_t_res); ir_node *res_flags = new_r_Proj(adds, mode_ANY, pn_arm_AddS_t_flags); ir_node *adc = new_bd_arm_AdC_t(dbgi, block, left_high, right_high, res_flags, mode); ir_set_dw_lowered(node, res_low, adc); }
ir_node *copy_const_value(dbg_info *dbg, ir_node *n, ir_node *block) { ir_graph *irg = get_irn_irg(block); /* @@@ GL I think we should implement this using the routines from irgopt * for dead node elimination/inlineing. */ ir_mode *m = get_irn_mode(n); ir_node *nn; switch (get_irn_opcode(n)) { case iro_Const: nn = new_rd_Const(dbg, irg, get_Const_tarval(n)); break; case iro_SymConst: nn = new_rd_SymConst(dbg, irg, get_irn_mode(n), get_SymConst_symbol(n), get_SymConst_kind(n)); break; case iro_Add: nn = new_rd_Add(dbg, block, copy_const_value(dbg, get_Add_left(n), block), copy_const_value(dbg, get_Add_right(n), block), m); break; case iro_Sub: nn = new_rd_Sub(dbg, block, copy_const_value(dbg, get_Sub_left(n), block), copy_const_value(dbg, get_Sub_right(n), block), m); break; case iro_Mul: nn = new_rd_Mul(dbg, block, copy_const_value(dbg, get_Mul_left(n), block), copy_const_value(dbg, get_Mul_right(n), block), m); break; case iro_And: nn = new_rd_And(dbg, block, copy_const_value(dbg, get_And_left(n), block), copy_const_value(dbg, get_And_right(n), block), m); break; case iro_Or: nn = new_rd_Or(dbg, block, copy_const_value(dbg, get_Or_left(n), block), copy_const_value(dbg, get_Or_right(n), block), m); break; case iro_Eor: nn = new_rd_Eor(dbg, block, copy_const_value(dbg, get_Eor_left(n), block), copy_const_value(dbg, get_Eor_right(n), block), m); break; case iro_Conv: nn = new_rd_Conv(dbg, block, copy_const_value(dbg, get_Conv_op(n), block), m); break; case iro_Minus: nn = new_rd_Minus(dbg, block, copy_const_value(dbg, get_Minus_op(n), block), m); break; case iro_Not: nn = new_rd_Not(dbg, block, copy_const_value(dbg, get_Not_op(n), block), m); break; case iro_Unknown: nn = new_r_Unknown(irg, m); break; default: panic("opcode invalid or not implemented %+F", n); } return nn; }
void x86_create_address_mode(x86_address_t *addr, ir_node *node, x86_create_am_flags_t flags) { addr->imm.kind = X86_IMM_VALUE; if (eat_immediate(addr, node, true)) { addr->variant = addr->ip_base ? X86_ADDR_RIP : X86_ADDR_JUST_IMM; return; } assert(!addr->ip_base); if (!(flags & x86_create_am_force) && x86_is_non_address_mode_node(node) && (!(flags & x86_create_am_double_use) || get_irn_n_edges(node) > 2)) { addr->variant = X86_ADDR_BASE; addr->base = node; return; } ir_node *eat_imms = eat_immediates(addr, node, flags, false); if (eat_imms != node) { if (flags & x86_create_am_force) eat_imms = be_skip_downconv(eat_imms, true); node = eat_imms; if (x86_is_non_address_mode_node(node)) { addr->variant = X86_ADDR_BASE; addr->base = node; return; } } /* starting point Add, Sub or Shl, FrameAddr */ if (is_Shl(node)) { /* We don't want to eat add x, x as shl here, so only test for real Shl * instructions, because we want the former as Lea x, x, not Shl x, 1 */ if (eat_shl(addr, node)) { addr->variant = X86_ADDR_INDEX; return; } } else if (eat_immediate(addr, node, true)) { /* we can hit this case in x86_create_am_force mode */ addr->variant = addr->ip_base ? X86_ADDR_RIP : X86_ADDR_JUST_IMM; return; } else if (is_Add(node)) { ir_node *left = get_Add_left(node); ir_node *right = get_Add_right(node); if (flags & x86_create_am_force) { left = be_skip_downconv(left, true); right = be_skip_downconv(right, true); } left = eat_immediates(addr, left, flags, false); right = eat_immediates(addr, right, flags, false); if (eat_shl(addr, left)) { left = NULL; } else if (eat_shl(addr, right)) { right = NULL; } /* (x & 0xFFFFFFFC) + (x >> 2) -> lea(x >> 2, x >> 2, 4) */ if (left != NULL && right != NULL) { ir_node *and; ir_node *shr; if (is_And(left) && (is_Shr(right) || is_Shrs(right))) { and = left; shr = right; goto tryit; } if (is_And(right) && (is_Shr(left) || is_Shrs(left))) { and = right; shr = left; tryit: if (get_And_left(and) == get_binop_left(shr)) { ir_node *and_right = get_And_right(and); ir_node *shr_right = get_binop_right(shr); if (is_Const(and_right) && is_Const(shr_right)) { ir_tarval *and_mask = get_Const_tarval(and_right); ir_tarval *shift_amount = get_Const_tarval(shr_right); ir_mode *mode = get_irn_mode(and); ir_tarval *all_one = get_mode_all_one(mode); ir_tarval *shift_mask = tarval_shl(tarval_shr(all_one, shift_amount), shift_amount); long val = get_tarval_long(shift_amount); if (and_mask == shift_mask && val >= 0 && val <= 3) { addr->variant = X86_ADDR_BASE_INDEX; addr->base = shr; addr->index = shr; addr->scale = val; return; } } } } } if (left != NULL) { ir_node *base = addr->base; if (base == NULL) { addr->variant = addr->index != NULL ? X86_ADDR_BASE_INDEX : X86_ADDR_BASE; addr->base = left; } else { addr->variant = X86_ADDR_BASE_INDEX; assert(addr->index == NULL && addr->scale == 0); assert(right == NULL); /* esp must be used as base */ if (is_Proj(left) && is_Start(get_Proj_pred(left))) { addr->index = base; addr->base = left; } else { addr->index = left; } } } if (right != NULL) { ir_node *base = addr->base; if (base == NULL) { addr->variant = addr->index != NULL ? X86_ADDR_BASE_INDEX : X86_ADDR_BASE; addr->base = right; } else { addr->variant = X86_ADDR_BASE_INDEX; assert(addr->index == NULL && addr->scale == 0); /* esp must be used as base */ if (is_Proj(right) && is_Start(get_Proj_pred(right))) { addr->index = base; addr->base = right; } else { addr->index = right; } } } return; } addr->variant = X86_ADDR_BASE; addr->base = node; }