/** * Checks whether node high is a sign extension of low. */ static bool is_sign_extend(ir_node *low, ir_node *high) { if (is_Shrs(high)) { ir_node *high_r = get_Shrs_right(high); if (!is_Const(high_r)) return false; ir_tarval *shift_count = get_Const_tarval(high_r); if (!tarval_is_long(shift_count)) return false; if (get_tarval_long(shift_count) != 31) return false; ir_node *high_l = get_Shrs_left(high); if (is_Conv(low) && get_Conv_op(low) == high_l) return true; if (is_Conv(high_l) && get_Conv_op(high_l) == low) return true; } else if (is_Const(low) && is_Const(high)) { ir_tarval *tl = get_Const_tarval(low); ir_tarval *th = get_Const_tarval(high); if (tarval_is_long(th) && tarval_is_long(tl)) { long l = get_tarval_long(tl); long h = get_tarval_long(th); return (h == 0 && l >= 0) || (h == -1 && l < 0); } } return false; }
/** * lower 64bit addition: an 32bit add for the lower parts, an add with * carry for the higher parts. If the carry's value is known, fold it * into the upper add. */ static void ia32_lower_add64(ir_node *node, ir_mode *mode) { dbg_info *dbg = get_irn_dbg_info(node); ir_node *block = get_nodes_block(node); ir_node *left = get_Add_left(node); ir_node *right = get_Add_right(node); ir_node *left_low = get_lowered_low(left); ir_node *left_high = get_lowered_high(left); ir_node *right_low = get_lowered_low(right); ir_node *right_high = get_lowered_high(right); ir_mode *low_mode = get_irn_mode(left_low); ir_mode *high_mode = get_irn_mode(left_high); carry_result cr = lower_add_carry(left, right, low_mode); assert(get_irn_mode(left_low) == get_irn_mode(right_low)); assert(get_irn_mode(left_high) == get_irn_mode(right_high)); if (cr == no_carry) { ir_node *add_low = new_rd_Add(dbg, block, left_low, right_low, low_mode); ir_node *add_high = new_rd_Add(dbg, block, left_high, right_high, high_mode); ir_set_dw_lowered(node, add_low, add_high); } else if (cr == must_carry && (is_Const(left_high) || is_Const(right_high))) { // We cannot assume that left_high and right_high form a normalized Add. ir_node *constant; ir_node *other; if (is_Const(left_high)) { constant = left_high; other = right_high; } else { constant = right_high; other = left_high; } ir_graph *irg = get_irn_irg(right_high); ir_node *one = new_rd_Const(dbg, irg, get_mode_one(high_mode)); ir_node *const_plus_one = new_rd_Add(dbg, block, constant, one, high_mode); ir_node *add_high = new_rd_Add(dbg, block, other, const_plus_one, high_mode); ir_node *add_low = new_rd_Add(dbg, block, left_low, right_low, low_mode); ir_set_dw_lowered(node, add_low, add_high); } else { /* l_res = a_l + b_l */ ir_node *add_low = new_bd_ia32_l_Add(dbg, block, left_low, right_low); ir_mode *mode_flags = ia32_reg_classes[CLASS_ia32_flags].mode; ir_node *res_low = new_r_Proj(add_low, ia32_mode_gp, pn_ia32_l_Add_res); ir_node *flags = new_r_Proj(add_low, mode_flags, pn_ia32_l_Add_flags); /* h_res = a_h + b_h + carry */ ir_node *add_high = new_bd_ia32_l_Adc(dbg, block, left_high, right_high, flags, mode); ir_set_dw_lowered(node, res_low, add_high); } }
/** * lower 64bit subtraction: a 32bit sub for the lower parts, a sub * with borrow for the higher parts. If the borrow's value is known, * fold it into the upper sub. */ static void ia32_lower_sub64(ir_node *node, ir_mode *mode) { dbg_info *dbg = get_irn_dbg_info(node); ir_node *block = get_nodes_block(node); ir_node *left = get_Sub_left(node); ir_node *right = get_Sub_right(node); ir_node *left_low = get_lowered_low(left); ir_node *left_high = get_lowered_high(left); ir_node *right_low = get_lowered_low(right); ir_node *right_high = get_lowered_high(right); ir_mode *low_mode = get_irn_mode(left_low); ir_mode *high_mode = get_irn_mode(left_high); carry_result cr = lower_sub_borrow(left, right, low_mode); assert(get_irn_mode(left_low) == get_irn_mode(right_low)); assert(get_irn_mode(left_high) == get_irn_mode(right_high)); if (cr == no_carry) { ir_node *sub_low = new_rd_Sub(dbg, block, left_low, right_low, low_mode); ir_node *sub_high = new_rd_Sub(dbg, block, left_high, right_high, high_mode); ir_set_dw_lowered(node, sub_low, sub_high); } else if (cr == must_carry && (is_Const(left_high) || is_Const(right_high))) { ir_node *sub_high; ir_graph *irg = get_irn_irg(right_high); ir_node *one = new_rd_Const(dbg, irg, get_mode_one(high_mode)); if (is_Const(right_high)) { ir_node *new_const = new_rd_Add(dbg, block, right_high, one, high_mode); sub_high = new_rd_Sub(dbg, block, left_high, new_const, high_mode); } else if (is_Const(left_high)) { ir_node *new_const = new_rd_Sub(dbg, block, left_high, one, high_mode); sub_high = new_rd_Sub(dbg, block, new_const, right_high, high_mode); } else { panic("logic error"); } ir_node *sub_low = new_rd_Sub(dbg, block, left_low, right_low, low_mode); ir_set_dw_lowered(node, sub_low, sub_high); } else { /* l_res = a_l - b_l */ ir_node *sub_low = new_bd_ia32_l_Sub(dbg, block, left_low, right_low); ir_mode *mode_flags = ia32_reg_classes[CLASS_ia32_flags].mode; ir_node *res_low = new_r_Proj(sub_low, ia32_mode_gp, pn_ia32_l_Sub_res); ir_node *flags = new_r_Proj(sub_low, mode_flags, pn_ia32_l_Sub_flags); /* h_res = a_h - b_h - carry */ ir_node *sub_high = new_bd_ia32_l_Sbb(dbg, block, left_high, right_high, flags, mode); ir_set_dw_lowered(node, res_low, sub_high); } }
/** * Try to place a Shl into an address mode. * * @param addr the address mode data so far * @param node the node to place * @return true on success */ static bool eat_shl(x86_address_t *addr, ir_node *node) { /* we can only eat a shl if we don't have a scale or index set yet */ if (addr->scale != 0 || addr->index != NULL) return false; ir_node *shifted_val; long val; if (is_Shl(node)) { /* we can use shl with 0,1,2 or 3 shift */ ir_node *right = get_Shl_right(node); if (!is_Const(right)) return false; ir_tarval *tv = get_Const_tarval(right); if (!tarval_is_long(tv)) return false; val = get_tarval_long(tv); if (val < 0 || val > 3) return false; if (val == 0) be_warningf(node, "found unoptimized Shl x,0"); shifted_val = get_Shl_left(node); } else if (is_Add(node)) { /* might be an add x, x */ ir_node *left = get_Add_left(node); ir_node *right = get_Add_right(node); if (left != right) return false; if (is_Const(left)) return false; val = 1; shifted_val = left; } else { return false; } if (x86_is_non_address_mode_node(node)) return false; addr->scale = val; addr->index = shifted_val; return true; }
void ConvHandler::cleanUp(Node node) { if (is_Conv(node)) { Node child = node.getChild(0); if (/*is_Conv(child) && */ node.getMode() == child.getMode()) replaceNode(node, child); else if (is_Const(child)) replaceNode(node, new_r_Const_long(irg, node.getMode(), child.getTarval().getLong())); else if (is_Conv(child)) set_irn_n(node, 0, child.getChild(0)); } }
/** * Adjust the size of a node representing a stack alloc to a certain * stack_alignment. * * @param size the node containing the non-aligned size * @param block the block where new nodes are allocated on * @return a node representing the aligned size */ static ir_node *adjust_alloc_size(dbg_info *dbgi, ir_node *size, ir_node *block) { if (stack_alignment <= 1) return size; if (is_Const(size) && !lower_constant_sizes) return size; ir_mode *mode = get_irn_mode(size); ir_tarval *tv = new_tarval_from_long(stack_alignment-1, mode); ir_graph *irg = get_Block_irg(block); ir_node *mask = new_r_Const(irg, tv); size = new_rd_Add(dbgi, block, size, mask, mode); tv = new_tarval_from_long(-(long)stack_alignment, mode); mask = new_r_Const(irg, tv); size = new_rd_And(dbgi, block, size, mask, mode); return size; }
Worklist::Worklist(ir_graph* functionGraph, GraphHandler& handler): functionGraph(functionGraph), handler(handler) { typedef void (*ir_func)(ir_node*, void*); struct envMembers { std::queue<ir_node*>* pQueue; std::unordered_map<ir_node*, bool>* pIsQueued; }; envMembers envInstance; envInstance.pQueue = &this->worklist; envInstance.pIsQueued = &this->isQueued; ir_func addPhis = [](ir_node * node, void* env) { if (is_Phi(node)) { auto envInstance = (envMembers*)env; set_irn_link(node, (void*)tarval_unknown); envInstance->pQueue->push(node); (*envInstance->pIsQueued)[node] = true; } }; ir_func addToWorklist = [](ir_node * node, void* env) { if (is_Phi(node)) return; auto envInstance = (envMembers*)env; ir_tarval* tarval; auto isBadNode = [&] (Node node) -> bool { if (is_Call(node) || is_Load(node) || is_Start(node)) return true; /*else if (Node(node).getChildCount() > 0) { for (Node child : Node(node).getChildren()) if (child.getTarval() == tarval_bad) return true; }*/ return false; }; // TODO: Support other modes such as Bu, Lu if (is_Const(node) && Node(node).getTarval().isNumeric()) tarval = get_Const_tarval(node); else if (isBadNode(Node(node))) tarval = tarval_bad; else tarval = tarval_unknown; set_irn_link(node, (void*)tarval); envInstance->pQueue->push(node); (*envInstance->pIsQueued)[node] = true; }; walk_topological(functionGraph, addPhis, (void*)&envInstance); walk_topological(functionGraph, addToWorklist, (void*)&envInstance); }
/** * Return the value of a Cmp if one or both predecessors * are Confirm nodes. * * @param cmp the Cmp node * @param left the left operand of the Cmp * @param right the right operand of the Cmp * @param relation the compare relation */ ir_tarval *computed_value_Cmp_Confirm(const ir_node *cmp, ir_node *left, ir_node *right, ir_relation relation) { ir_node *l_bound; ir_relation l_relation, res_relation, neg_relation; interval_t l_iv, r_iv; ir_tarval *tv; if (is_Confirm(right)) { /* we want the Confirm on the left side */ ir_node *t = right; right = left; left = t; relation = get_inversed_relation(relation); } else if (! is_Confirm(left)) { /* nothing more found */ tv = tarval_bad; goto check_null_case; } /* ok, here at least left is a Confirm, right might be */ l_bound = get_Confirm_bound(left); l_relation = get_Confirm_relation(left); if (is_Confirm(right)) { /* * both sides are Confirm's. Check some rare cases first. */ ir_node *r_bound = get_Confirm_bound(right); ir_relation r_relation = get_Confirm_relation(right); /* * some check can be made WITHOUT constant bounds */ if (r_bound == l_bound) { if (is_transitive(l_relation)) { ir_relation r_inc_relation = get_inversed_relation(r_relation); /* * triangle inequality: * * a CMP B && B CMP b => a CMP b, !(a ~CMP b) * * We handle correctly cases with some <=/>= here */ if ((l_relation & ~ir_relation_equal) == (r_inc_relation & ~ir_relation_equal)) { res_relation = (l_relation & ~ir_relation_equal) | (l_relation & r_inc_relation & ir_relation_equal); if ((relation == res_relation) || ((relation & ~ir_relation_equal) == res_relation)) { DBG_OUT_TR(l_relation, l_bound, r_relation, r_bound, relation, "true"); DBG_EVAL_CONFIRM(cmp); return tarval_b_true; } else { ir_relation neg_relation = get_negated_relation(relation); if ((neg_relation == res_relation) || ((neg_relation & ~ir_relation_equal) == res_relation)) { DBG_OUT_TR(l_relation, l_bound, r_relation, r_bound, relation, "false"); DBG_EVAL_CONFIRM(cmp); return tarval_b_false; } } } } } /* * Here, we check only the right Confirm, as the left Confirms are * checked later anyway. */ if (left == r_bound) { /* * l == bound(r) AND relation(r) == relation: * * We know that a CMP b and check for that */ if ((r_relation == relation) || (r_relation == (relation & ~ir_relation_equal))) { DBG_OUT_R(r_relation, r_bound, left, relation, right, "true"); DBG_EVAL_CONFIRM(cmp); return tarval_b_true; } /* * l == bound(r) AND relation(r) != relation: * * We know that a CMP b and check for a ~CMP b */ else { neg_relation = get_negated_relation(relation); if ((r_relation == neg_relation) || (r_relation == (neg_relation & ~ir_relation_equal))) { DBG_OUT_R(r_relation, r_bound, left, relation, right, "false"); DBG_EVAL_CONFIRM(cmp); return tarval_b_false; } } } /* now, try interval magic */ tv = compare_iv( get_interval(&l_iv, l_bound, l_relation), get_interval(&r_iv, r_bound, r_relation), relation); if (tv != tarval_bad) { DBG_EVAL_CONFIRM(cmp); return tv; } } /* from Here, check only left Confirm */ /* * some checks can be made WITHOUT constant bounds */ if (right == l_bound) { /* * r == bound(l) AND relation(l) == relation: * * We know that a CMP b and check for that */ if ((l_relation == relation) || (l_relation == (relation & ~ir_relation_equal))) { DBG_OUT_L(l_relation, l_bound, left, relation, right, "true"); DBG_EVAL_CONFIRM(cmp); return tarval_b_true; } /* * r == bound(l) AND relation(l) is Not(relation): * * We know that a CMP b and check for a ~CMP b */ else { neg_relation = get_negated_relation(relation); if ((l_relation == neg_relation) || (l_relation == (neg_relation & ~ir_relation_equal))) { DBG_OUT_L(l_relation, l_bound, left, relation, right, "false"); DBG_EVAL_CONFIRM(cmp); return tarval_b_false; } } } /* now, only right == Const can help */ tv = value_of(right); if (tv != tarval_bad) { tv = compare_iv( get_interval(&l_iv, l_bound, l_relation), get_interval_from_tv(&r_iv, tv), relation); } else { check_null_case: /* check some other cases */ if ((relation == ir_relation_equal || relation == ir_relation_less_greater) && is_Const(right) && is_Const_null(right)) { /* for == 0 or != 0 we have some special tools */ ir_mode *mode = get_irn_mode(left); const ir_node *dummy; if (mode_is_reference(mode)) { if (value_not_null(left, &dummy)) { tv = relation == ir_relation_equal ? tarval_b_false : tarval_b_true; } } else { if (value_not_zero(left, &dummy)) { tv = relation == ir_relation_equal ? tarval_b_false : tarval_b_true; } } } } if (tv != tarval_bad) DBG_EVAL_CONFIRM(cmp); return tv; }
void x86_create_address_mode(x86_address_t *addr, ir_node *node, x86_create_am_flags_t flags) { addr->imm.kind = X86_IMM_VALUE; if (eat_immediate(addr, node, true)) { addr->variant = addr->ip_base ? X86_ADDR_RIP : X86_ADDR_JUST_IMM; return; } assert(!addr->ip_base); if (!(flags & x86_create_am_force) && x86_is_non_address_mode_node(node) && (!(flags & x86_create_am_double_use) || get_irn_n_edges(node) > 2)) { addr->variant = X86_ADDR_BASE; addr->base = node; return; } ir_node *eat_imms = eat_immediates(addr, node, flags, false); if (eat_imms != node) { if (flags & x86_create_am_force) eat_imms = be_skip_downconv(eat_imms, true); node = eat_imms; if (x86_is_non_address_mode_node(node)) { addr->variant = X86_ADDR_BASE; addr->base = node; return; } } /* starting point Add, Sub or Shl, FrameAddr */ if (is_Shl(node)) { /* We don't want to eat add x, x as shl here, so only test for real Shl * instructions, because we want the former as Lea x, x, not Shl x, 1 */ if (eat_shl(addr, node)) { addr->variant = X86_ADDR_INDEX; return; } } else if (eat_immediate(addr, node, true)) { /* we can hit this case in x86_create_am_force mode */ addr->variant = addr->ip_base ? X86_ADDR_RIP : X86_ADDR_JUST_IMM; return; } else if (is_Add(node)) { ir_node *left = get_Add_left(node); ir_node *right = get_Add_right(node); if (flags & x86_create_am_force) { left = be_skip_downconv(left, true); right = be_skip_downconv(right, true); } left = eat_immediates(addr, left, flags, false); right = eat_immediates(addr, right, flags, false); if (eat_shl(addr, left)) { left = NULL; } else if (eat_shl(addr, right)) { right = NULL; } /* (x & 0xFFFFFFFC) + (x >> 2) -> lea(x >> 2, x >> 2, 4) */ if (left != NULL && right != NULL) { ir_node *and; ir_node *shr; if (is_And(left) && (is_Shr(right) || is_Shrs(right))) { and = left; shr = right; goto tryit; } if (is_And(right) && (is_Shr(left) || is_Shrs(left))) { and = right; shr = left; tryit: if (get_And_left(and) == get_binop_left(shr)) { ir_node *and_right = get_And_right(and); ir_node *shr_right = get_binop_right(shr); if (is_Const(and_right) && is_Const(shr_right)) { ir_tarval *and_mask = get_Const_tarval(and_right); ir_tarval *shift_amount = get_Const_tarval(shr_right); ir_mode *mode = get_irn_mode(and); ir_tarval *all_one = get_mode_all_one(mode); ir_tarval *shift_mask = tarval_shl(tarval_shr(all_one, shift_amount), shift_amount); long val = get_tarval_long(shift_amount); if (and_mask == shift_mask && val >= 0 && val <= 3) { addr->variant = X86_ADDR_BASE_INDEX; addr->base = shr; addr->index = shr; addr->scale = val; return; } } } } } if (left != NULL) { ir_node *base = addr->base; if (base == NULL) { addr->variant = addr->index != NULL ? X86_ADDR_BASE_INDEX : X86_ADDR_BASE; addr->base = left; } else { addr->variant = X86_ADDR_BASE_INDEX; assert(addr->index == NULL && addr->scale == 0); assert(right == NULL); /* esp must be used as base */ if (is_Proj(left) && is_Start(get_Proj_pred(left))) { addr->index = base; addr->base = left; } else { addr->index = left; } } } if (right != NULL) { ir_node *base = addr->base; if (base == NULL) { addr->variant = addr->index != NULL ? X86_ADDR_BASE_INDEX : X86_ADDR_BASE; addr->base = right; } else { addr->variant = X86_ADDR_BASE_INDEX; assert(addr->index == NULL && addr->scale == 0); /* esp must be used as base */ if (is_Proj(right) && is_Start(get_Proj_pred(right))) { addr->index = base; addr->base = right; } else { addr->index = right; } } } return; } addr->variant = X86_ADDR_BASE; addr->base = node; }