/** * checks whether a node is an arg */ static int is_arg(ir_node *node) { if (! is_Proj(node)) return 0; node = get_Proj_pred(node); if (! is_Proj(node)) return 0; node = get_Proj_pred(node); return is_Start(node); }
/** * lower Alloca nodes to allocate "bytes" instead of a certain type */ static void lower_alloca_free(ir_node *node, void *data) { (void) data; if (is_Alloc(node)) { } else if (is_Proj(node)) { ir_node *proj_pred = get_Proj_pred(node); if (is_Alloc(proj_pred)) { transform_Proj_Alloc(node); } return; } else { return; } if (!ir_nodeset_insert(&transformed, node)) return; if (stack_alignment <= 1) return; ir_node *const size = get_Alloc_size(node); ir_node *const mem = get_Alloc_mem(node); ir_node *const block = get_nodes_block(node); dbg_info *const dbgi = get_irn_dbg_info(node); ir_node *const new_size = adjust_alloc_size(dbgi, size, block); ir_node *const new_node = new_rd_Alloc(dbgi, block, mem, new_size, 1); ir_nodeset_insert(&transformed, new_node); if (new_node != node) exchange(node, new_node); }
/** * Place operands of node into an address mode. * * @param addr the address mode data so far * @param node the node * @param flags the flags * * @return the folded node */ static ir_node *eat_immediates(x86_address_t *addr, ir_node *node, x86_create_am_flags_t flags, bool basereg_usable) { if (!(flags & x86_create_am_force) && x86_is_non_address_mode_node(node) && (!(flags & x86_create_am_double_use) || get_irn_n_edges(node) > 2)) return node; if (is_Add(node)) { ir_node *left = get_Add_left(node); ir_node *right = get_Add_right(node); if (eat_immediate(addr, left, basereg_usable)) return eat_immediates(addr, right, x86_create_am_normal, basereg_usable); if (eat_immediate(addr, right, basereg_usable)) return eat_immediates(addr, left, x86_create_am_normal, basereg_usable); } else if (is_Member(node)) { assert(addr->imm.entity == NULL); addr->imm.entity = get_Member_entity(node); addr->imm.kind = X86_IMM_FRAMEENT; ir_node *ptr = get_Member_ptr(node); assert(is_Start(get_Proj_pred(ptr))); return ptr; } return node; }
/** Transforms: * a * | * Tuple * | => * Proj x a */ static void exchange_tuple_projs(ir_node *node, void *env) { bool *changed = (bool*)env; if (!is_Proj(node)) return; /* Handle Tuple(Tuple,...) case. */ exchange_tuple_projs(get_Proj_pred(node), env); ir_node *pred = get_Proj_pred(node); if (!is_Tuple(pred)) return; unsigned pn = get_Proj_num(node); ir_node *tuple_pred = get_Tuple_pred(pred, pn); exchange(node, tuple_pred); *changed = true; }
static ir_node *transform_proj(ir_node *node) { ir_node *pred = get_Proj_pred(node); ir_op *pred_op = get_irn_op(pred); be_transform_func *proj_transform = (be_transform_func*)pred_op->ops.generic1; /* we should have a Proj transformer registered */ assert(proj_transform != NULL); return proj_transform(node); }
static ir_node *transform_Proj_ASM(ir_node *const node) { ir_node *const pred = get_Proj_pred(node); ir_node *const new_pred = be_transform_node(pred); ir_mode *const mode = get_irn_mode(node); unsigned const num = mode == mode_M ? arch_get_irn_n_outs(new_pred) - 1 : get_Proj_num(node); return be_new_Proj(new_pred, num); }
static ir_node *transform_proj(ir_node *node) { ir_node *pred = get_Proj_pred(node); ir_op *pred_op = get_irn_op(pred); be_transform_func *proj_transform = (be_transform_func*)pred_op->ops.generic1; /* we should have a Proj transformer registered */ #ifdef DEBUG_libfirm if (!proj_transform) { unsigned const node_pn = get_Proj_num(node); if (is_Proj(pred)) { unsigned const pred_pn = get_Proj_num(pred); ir_node *const pred_pred = get_Proj_pred(pred); panic("no transformer for %+F (%u) -> %+F (%u) -> %+F", node, node_pn, pred, pred_pn, pred_pred); } else { panic("no transformer for %+F (%u) -> %+F", node, node_pn, pred); } } #endif return proj_transform(node); }
/* * Check, if the value of a node cannot represent a NULL pointer. * * - Sels are skipped * - A SymConst(entity) is NEVER a NULL pointer * - Confirms are evaluated */ int value_not_null(const ir_node *n, const ir_node **confirm) { ir_tarval *tv; *confirm = NULL; tv = value_of(n); if (tarval_is_constant(tv) && ! tarval_is_null(tv)) return 1; assert(mode_is_reference(get_irn_mode(n))); /* skip all Sel nodes */ while (is_Sel(n)) { n = get_Sel_ptr(n); } while (1) { if (is_Proj(n)) { n = get_Proj_pred(n); continue; } break; } if (is_SymConst_addr_ent(n)) { /* global references are never NULL */ return 1; } else if (n == get_irg_frame(get_irn_irg(n))) { /* local references are never NULL */ return 1; } else if (is_Alloc(n)) { /* alloc never returns NULL (it throws an exception instead) */ return 1; } else { /* check for more Confirms */ for (; is_Confirm(n); n = get_Confirm_value(n)) { if (get_Confirm_relation(n) == ir_relation_less_greater) { ir_node *bound = get_Confirm_bound(n); ir_tarval *tv = value_of(bound); if (tarval_is_null(tv)) { *confirm = n; return 1; } } } } return 0; }
static void transform_Proj_Alloc(ir_node *node) { /* we might need a result adjustment */ if (addr_delta == 0) return; if (get_Proj_proj(node) != pn_Alloc_res) return; if (ir_nodeset_contains(&transformed, node)) return; ir_node *const alloc = get_Proj_pred(node); dbg_info *const dbgi = get_irn_dbg_info(alloc); ir_graph *const irg = get_irn_irg(node); ir_node *const block = get_nodes_block(node); ir_node *const delta = new_r_Const_long(irg, mode_P, addr_delta); ir_node *const dummy = new_r_Dummy(irg, mode_P); ir_node *const add = new_rd_Add(dbgi, block, dummy, delta, mode_P); exchange(node, add); ir_node *const new_proj = new_r_Proj(alloc, mode_P, pn_Alloc_res); set_Add_left(add, new_proj); ir_nodeset_insert(&transformed, new_proj); }
static void infer_typeinfo_walker(ir_node *irn, void *env) { bool *changed = (bool*) env; // A node's type needs only to be calculated once. if (get_irn_typeinfo_type(irn) != initial_type) return; if (is_Alloc(irn)) { // this one is easy, we know the exact dynamic type. ir_type *type = get_Alloc_type(irn); if (! is_Class_type(type)) return; set_irn_typeinfo_type(irn, type); *changed = true; } else if (is_Sel(irn) || is_SymConst_addr_ent(irn)) { // the type we determine here is the one of the entity we select or reference. // the transform_Sel method below will use the type incoming on the Sel_ptr input. ir_type *type = get_Sel_or_SymConst_type(irn); if (! type) return; ir_type *one_alive = get_alive_subclass(type); if (! one_alive) return; set_irn_typeinfo_type(irn, one_alive); *changed = true; } else if (is_Call(irn)) { // the dynamic type of the call result is the return type of the called entity. ir_node *call_pred = get_Call_ptr(irn); ir_type *pred_type = get_irn_typeinfo_type(call_pred); if (pred_type == initial_type) return; set_irn_typeinfo_type(irn, pred_type); *changed = true; } else if (is_Load(irn)) { // the dynamic type of the Load result is the type of the loaded entity. ir_node *load_pred = get_Load_ptr(irn); if (! is_Sel(load_pred) && !is_SymConst_addr_ent(load_pred)) return; ir_type *pred_type = get_irn_typeinfo_type(load_pred); if (pred_type == initial_type) return; set_irn_typeinfo_type(irn, pred_type); *changed = true; } else if (is_Proj(irn)) { // Types have to be propagated through Proj nodes (XXX: and also through Cast and Confirm ir_mode *pmode = get_irn_mode(irn); if (pmode != mode_P) return; ir_node *proj_pred = get_Proj_pred(irn); if (is_Proj(proj_pred) && get_irn_mode(proj_pred) == mode_T && get_Proj_proj(proj_pred) == pn_Call_T_result && is_Call(get_Proj_pred(proj_pred))) proj_pred = get_Proj_pred(proj_pred); // skip the result tuple ir_type *pred_type = get_irn_typeinfo_type(proj_pred); if (pred_type == initial_type) return; set_irn_typeinfo_type(irn, pred_type); *changed = true; } else if (is_Phi(irn)) { // Phi nodes are a special case because the incoming type information must be merged // A Phi node's type is unknown until all inputs are known to be the same dynamic type. ir_mode *pmode = get_irn_mode(irn); if (pmode != mode_P) return; int phi_preds = get_Phi_n_preds(irn); ir_type *last = NULL; for (int p = 0; p < phi_preds; p++) { ir_node *pred = get_Phi_pred(irn, p); ir_type *pred_type = get_irn_typeinfo_type(pred); if (pred_type == initial_type) return; if (p && last != pred_type) return; last = pred_type; } set_irn_typeinfo_type(irn, last); } }
void x86_create_address_mode(x86_address_t *addr, ir_node *node, x86_create_am_flags_t flags) { addr->imm.kind = X86_IMM_VALUE; if (eat_immediate(addr, node, true)) { addr->variant = addr->ip_base ? X86_ADDR_RIP : X86_ADDR_JUST_IMM; return; } assert(!addr->ip_base); if (!(flags & x86_create_am_force) && x86_is_non_address_mode_node(node) && (!(flags & x86_create_am_double_use) || get_irn_n_edges(node) > 2)) { addr->variant = X86_ADDR_BASE; addr->base = node; return; } ir_node *eat_imms = eat_immediates(addr, node, flags, false); if (eat_imms != node) { if (flags & x86_create_am_force) eat_imms = be_skip_downconv(eat_imms, true); node = eat_imms; if (x86_is_non_address_mode_node(node)) { addr->variant = X86_ADDR_BASE; addr->base = node; return; } } /* starting point Add, Sub or Shl, FrameAddr */ if (is_Shl(node)) { /* We don't want to eat add x, x as shl here, so only test for real Shl * instructions, because we want the former as Lea x, x, not Shl x, 1 */ if (eat_shl(addr, node)) { addr->variant = X86_ADDR_INDEX; return; } } else if (eat_immediate(addr, node, true)) { /* we can hit this case in x86_create_am_force mode */ addr->variant = addr->ip_base ? X86_ADDR_RIP : X86_ADDR_JUST_IMM; return; } else if (is_Add(node)) { ir_node *left = get_Add_left(node); ir_node *right = get_Add_right(node); if (flags & x86_create_am_force) { left = be_skip_downconv(left, true); right = be_skip_downconv(right, true); } left = eat_immediates(addr, left, flags, false); right = eat_immediates(addr, right, flags, false); if (eat_shl(addr, left)) { left = NULL; } else if (eat_shl(addr, right)) { right = NULL; } /* (x & 0xFFFFFFFC) + (x >> 2) -> lea(x >> 2, x >> 2, 4) */ if (left != NULL && right != NULL) { ir_node *and; ir_node *shr; if (is_And(left) && (is_Shr(right) || is_Shrs(right))) { and = left; shr = right; goto tryit; } if (is_And(right) && (is_Shr(left) || is_Shrs(left))) { and = right; shr = left; tryit: if (get_And_left(and) == get_binop_left(shr)) { ir_node *and_right = get_And_right(and); ir_node *shr_right = get_binop_right(shr); if (is_Const(and_right) && is_Const(shr_right)) { ir_tarval *and_mask = get_Const_tarval(and_right); ir_tarval *shift_amount = get_Const_tarval(shr_right); ir_mode *mode = get_irn_mode(and); ir_tarval *all_one = get_mode_all_one(mode); ir_tarval *shift_mask = tarval_shl(tarval_shr(all_one, shift_amount), shift_amount); long val = get_tarval_long(shift_amount); if (and_mask == shift_mask && val >= 0 && val <= 3) { addr->variant = X86_ADDR_BASE_INDEX; addr->base = shr; addr->index = shr; addr->scale = val; return; } } } } } if (left != NULL) { ir_node *base = addr->base; if (base == NULL) { addr->variant = addr->index != NULL ? X86_ADDR_BASE_INDEX : X86_ADDR_BASE; addr->base = left; } else { addr->variant = X86_ADDR_BASE_INDEX; assert(addr->index == NULL && addr->scale == 0); assert(right == NULL); /* esp must be used as base */ if (is_Proj(left) && is_Start(get_Proj_pred(left))) { addr->index = base; addr->base = left; } else { addr->index = left; } } } if (right != NULL) { ir_node *base = addr->base; if (base == NULL) { addr->variant = addr->index != NULL ? X86_ADDR_BASE_INDEX : X86_ADDR_BASE; addr->base = right; } else { addr->variant = X86_ADDR_BASE_INDEX; assert(addr->index == NULL && addr->scale == 0); /* esp must be used as base */ if (is_Proj(right) && is_Start(get_Proj_pred(right))) { addr->index = base; addr->base = right; } else { addr->index = right; } } } return; } addr->variant = X86_ADDR_BASE; addr->base = node; }