/** * Transforms a Conv into the appropriate soft float function. */ static bool lower_Conv(ir_node *const n) { dbg_info *const dbgi = get_irn_dbg_info(n); ir_node *const block = get_nodes_block(n); ir_mode *const mode = get_irn_mode(n); ir_node *op = get_Conv_op(n); ir_mode *op_mode = get_irn_mode(op); char const *name; if (!mode_is_float(mode)) { if (!mode_is_float(op_mode)) return false; if (mode_is_signed(mode)) name = "fix"; else name = "fixuns"; } else if (!mode_is_float(op_mode)) { ir_mode *min_mode; if (mode_is_signed(op_mode)) { name = "float"; min_mode = mode_Is; } else { name = "floatun"; min_mode = mode_Iu; } if (get_mode_size_bits(op_mode) < get_mode_size_bits(min_mode)) { op_mode = min_mode; op = new_rd_Conv(dbgi, block, op, op_mode); } } else { /* Remove unnecessary Convs. */ if (op_mode == mode) { exchange(n, op); return true; } if (get_mode_size_bits(op_mode) > get_mode_size_bits(mode)) name = "trunc"; else name = "extend"; } ir_node *const in[] = { op }; ir_node *result = make_softfloat_call(n, name, ARRAY_SIZE(in), in); /* Check whether we need a Conv for the result. */ if (get_irn_mode(result) != mode) result = new_rd_Conv(dbgi, block, result, mode); exchange(n, result); return true; }
bool x86_match_immediate(x86_imm32_t *immediate, const ir_node *node, char const constraint) { ir_mode *const mode = get_irn_mode(node); if (get_mode_arithmetic(mode) != irma_twos_complement) return false; ir_tarval *offset; ir_entity *entity; if (!be_match_immediate(node, &offset, &entity)) return false; long val = 0; if (offset) { if (!tarval_is_long(offset)) { be_warningf(node, "tarval is not long"); return false; } val = get_tarval_long(offset); if (!check_immediate_constraint(val, constraint)) return false; } if (entity != NULL) { /* we need full 32bits for entities */ if (constraint != 'i' && constraint != 'g') return false; } /* we are fine */ immediate->entity = entity; immediate->offset = (int32_t)val; return true; }
void set_atomic_ent_value(ir_entity *entity, ir_node *val) { assert(is_atomic_entity(entity)); assert(is_Dummy(val) || get_irn_mode(val) == get_type_mode(entity->type)); ir_initializer_t *initializer = create_initializer_const(val); entity->initializer = initializer; }
/** * analyze enough to decide if we should lower the switch */ static void analyse_switch0(switch_info_t *info, ir_node *switchn) { const ir_switch_table *table = get_Switch_table(switchn); size_t n_entries = ir_switch_table_get_n_entries(table); ir_mode *mode = get_irn_mode(get_Switch_selector(switchn)); ir_tarval *switch_min = get_mode_max(mode); ir_tarval *switch_max = get_mode_min(mode); unsigned num_cases = 0; for (size_t e = 0; e < n_entries; ++e) { const ir_switch_table_entry *entry = ir_switch_table_get_entry_const(table, e); if (entry->pn == pn_Switch_default) continue; if (tarval_cmp(entry->min, switch_min) == ir_relation_less) switch_min = entry->min; if (tarval_cmp(entry->max, switch_max) == ir_relation_greater) switch_max = entry->max; ++num_cases; } info->switchn = switchn; info->switch_min = switch_min; info->switch_max = switch_max; info->num_cases = num_cases; }
ir_node *be_transform_phi(ir_node *node, const arch_register_req_t *req) { ir_node *block = be_transform_nodes_block(node); ir_graph *irg = get_irn_irg(block); dbg_info *dbgi = get_irn_dbg_info(node); /* phi nodes allow loops, so we use the old arguments for now * and fix this later */ ir_node **ins = get_irn_in(node)+1; int arity = get_irn_arity(node); ir_mode *mode = req->cls != NULL ? req->cls->mode : get_irn_mode(node); ir_node *phi = new_ir_node(dbgi, irg, block, op_Phi, mode, arity, ins); copy_node_attr(irg, node, phi); backend_info_t *info = be_get_info(phi); info->in_reqs = be_allocate_in_reqs(irg, arity); for (int i = 0; i < arity; ++i) { info->in_reqs[i] = req; } arch_set_irn_register_req_out(phi, 0, req); be_enqueue_preds(node); return phi; }
ir_node *be_duplicate_node(ir_node *node) { ir_node *block = be_transform_node(get_nodes_block(node)); ir_graph *irg = env.irg; dbg_info *dbgi = get_irn_dbg_info(node); ir_mode *mode = get_irn_mode(node); ir_op *op = get_irn_op(node); ir_node *new_node; int arity = get_irn_arity(node); if (op->opar == oparity_dynamic) { new_node = new_ir_node(dbgi, irg, block, op, mode, -1, NULL); for (int i = 0; i < arity; ++i) { ir_node *in = get_irn_n(node, i); in = be_transform_node(in); add_irn_n(new_node, in); } } else { ir_node **ins = ALLOCAN(ir_node*, arity); for (int i = 0; i < arity; ++i) { ir_node *in = get_irn_n(node, i); ins[i] = be_transform_node(in); } new_node = new_ir_node(dbgi, irg, block, op, mode, arity, ins); } copy_node_attr(irg, node, new_node); be_duplicate_deps(node, new_node); new_node->node_nr = node->node_nr; return new_node; }
ir_node *be_new_d_Copy(dbg_info *const dbgi, ir_node *const block, ir_node *const op) { ir_graph *const irg = get_irn_irg(block); ir_node *const in[] = { op }; ir_node *const res = new_ir_node(dbgi, irg, block, op_be_Copy, get_irn_mode(op), ARRAY_SIZE(in), in); set_copy_info(res, irg, op, arch_irn_flags_none); return res; }
ir_node *gcji_array_data_addr(ir_node *addr) { ir_mode *mode = get_irn_mode(addr); ir_mode *mode_offset = get_reference_offset_mode(mode); unsigned offset = array_header_size; ir_node *offset_cnst = new_Const_long(mode_offset, offset); return new_Add(addr, offset_cnst); }
/** * Copies a node to a new irg. The Ins of the new node point to * the predecessors on the old irg. n->link points to the new node. * * @param n The node to be copied * @param irg the new irg * * Does NOT copy standard nodes like Start, End etc that are fixed * in an irg. Instead, the corresponding nodes of the new irg are returned. * Note further, that the new nodes have no block. */ static void copy_irn_to_irg(ir_node *n, ir_graph *irg) { /* do not copy standard nodes */ ir_node *nn = NULL; switch (get_irn_opcode(n)) { case iro_NoMem: nn = get_irg_no_mem(irg); break; case iro_Block: { ir_graph *old_irg = get_irn_irg(n); if (n == get_irg_start_block(old_irg)) nn = get_irg_start_block(irg); else if (n == get_irg_end_block(old_irg)) nn = get_irg_end_block(irg); break; } case iro_Start: nn = get_irg_start(irg); break; case iro_End: nn = get_irg_end(irg); break; case iro_Proj: { ir_graph *old_irg = get_irn_irg(n); if (n == get_irg_frame(old_irg)) nn = get_irg_frame(irg); else if (n == get_irg_initial_mem(old_irg)) nn = get_irg_initial_mem(irg); else if (n == get_irg_args(old_irg)) nn = get_irg_args(irg); break; } } if (nn) { set_irn_link(n, nn); return; } nn = new_ir_node(get_irn_dbg_info(n), irg, NULL, /* no block yet, will be set later */ get_irn_op(n), get_irn_mode(n), get_irn_arity(n), get_irn_in(n)); /* Copy the attributes. These might point to additional data. If this was allocated on the old obstack the pointers now are dangling. This frees e.g. the memory of the graph_arr allocated in new_immBlock. */ copy_node_attr(irg, n, nn); set_irn_link(n, nn); }
void be_set_phi_reg_req(ir_node *node, const arch_register_req_t *req) { assert(mode_is_data(get_irn_mode(node))); backend_info_t *info = be_get_info(node); info->out_infos[0].req = req; for (int i = 0, arity = get_irn_arity(node); i < arity; ++i) { info->in_reqs[i] = req; } }
ir_node *be_new_Copy(ir_node *bl, ir_node *op) { ir_graph *irg = get_irn_irg(bl); ir_node *in[] = { op }; ir_node *res = new_ir_node(NULL, irg, bl, op_be_Copy, get_irn_mode(op), ARRAY_SIZE(in), in); set_copy_info(res, irg, op, arch_irn_flags_none); return res; }
static ir_node *transform_Proj_ASM(ir_node *const node) { ir_node *const pred = get_Proj_pred(node); ir_node *const new_pred = be_transform_node(pred); ir_mode *const mode = get_irn_mode(node); unsigned const num = mode == mode_M ? arch_get_irn_n_outs(new_pred) - 1 : get_Proj_num(node); return be_new_Proj(new_pred, num); }
static bool lower_Bitcast(ir_node *const n) { /* bitcast was casting float->int or int->float we can simply replace it * with a conv (between integer modes) now. */ ir_node *op = get_Bitcast_op(n); ir_mode *src_mode = get_irn_mode(op); ir_mode *dst_mode = get_irn_mode(n); /* note that the predecessor may already be transformed, so it's * possible that we don't see a float mode anymore. */ if (mode_is_float(dst_mode)) dst_mode = get_lowered_mode(dst_mode); ir_node *res = op; if (src_mode != dst_mode) { dbg_info *dbgi = get_irn_dbg_info(n); ir_node *block = get_nodes_block(n); res = new_rd_Conv(dbgi, block, op, dst_mode); } exchange(n, res); return true; }
/** patches Addresses to work in position independent code */ static void fix_pic_addresses(ir_node *const node, void *const data) { (void)data; ir_graph *const irg = get_irn_irg(node); be_main_env_t *const be = be_get_irg_main_env(irg); foreach_irn_in(node, i, pred) { if (!is_Address(pred)) continue; ir_node *res; ir_entity *const entity = get_Address_entity(pred); dbg_info *const dbgi = get_irn_dbg_info(pred); if (i == n_Call_ptr && is_Call(node)) { /* Calls can jump to relative addresses, so we can directly jump to * the (relatively) known call address or the trampoline */ if (can_address_relative(entity)) continue; ir_entity *const trampoline = get_trampoline(be, entity); res = new_rd_Address(dbgi, irg, trampoline); } else if (get_entity_type(entity) == get_code_type()) { /* Block labels can always be addressed directly. */ continue; } else { /* Everything else is accessed relative to EIP. */ ir_node *const block = get_nodes_block(pred); ir_mode *const mode = get_irn_mode(pred); ir_node *const pic_base = ia32_get_pic_base(irg); if (can_address_relative(entity)) { /* All ok now for locally constructed stuff. */ res = new_rd_Add(dbgi, block, pic_base, pred, mode); /* Make sure the walker doesn't visit this add again. */ mark_irn_visited(res); } else { /* Get entry from pic symbol segment. */ ir_entity *const pic_symbol = get_pic_symbol(be, entity); ir_node *const pic_address = new_rd_Address(dbgi, irg, pic_symbol); ir_node *const add = new_rd_Add(dbgi, block, pic_base, pic_address, mode); mark_irn_visited(add); /* We need an extra indirection for global data outside our current * module. The loads are always safe and can therefore float and * need no memory input */ ir_type *const type = get_entity_type(entity); ir_node *const nomem = get_irg_no_mem(irg); ir_node *const load = new_rd_Load(dbgi, block, nomem, add, mode, type, cons_floats); res = new_r_Proj(load, mode, pn_Load_res); } } set_irn_n(node, i, res); } }
/** * Check if a argument of the ir graph with mode * reference is read, write or both. * * @param irg The ir graph to analyze. */ static void analyze_ent_args(ir_entity *ent) { ir_type *mtp = get_entity_type(ent); size_t nparams = get_method_n_params(mtp); ent->attr.mtd_attr.param_access = NEW_ARR_F(ptr_access_kind, nparams); /* If the method haven't parameters we have * nothing to do. */ if (nparams <= 0) return; /* we have not yet analyzed the graph, set ALL access for pointer args */ for (size_t i = nparams; i-- > 0; ) { ir_type *type = get_method_param_type(mtp, i); ent->attr.mtd_attr.param_access[i] = is_Pointer_type(type) ? ptr_access_all : ptr_access_none; } ir_graph *irg = get_entity_irg(ent); if (irg == NULL) { /* no graph, no better info */ return; } assure_irg_outs(irg); ir_node *irg_args = get_irg_args(irg); /* A array to save the information for each argument with mode reference.*/ ptr_access_kind *rw_info; NEW_ARR_A(ptr_access_kind, rw_info, nparams); /* We initialize the element with none state. */ for (size_t i = nparams; i-- > 0; ) rw_info[i] = ptr_access_none; /* search for arguments with mode reference to analyze them.*/ for (int i = get_irn_n_outs(irg_args); i-- > 0; ) { ir_node *arg = get_irn_out(irg_args, i); ir_mode *arg_mode = get_irn_mode(arg); long proj_nr = get_Proj_proj(arg); if (mode_is_reference(arg_mode)) rw_info[proj_nr] |= analyze_arg(arg, rw_info[proj_nr]); } /* copy the temporary info */ memcpy(ent->attr.mtd_attr.param_access, rw_info, nparams * sizeof(ent->attr.mtd_attr.param_access[0])); }
/** * Transforms an Add into the appropriate soft float function. */ static bool lower_Add(ir_node *const n) { ir_mode *const mode = get_irn_mode(n); if (!mode_is_float(mode)) return false; ir_node *const left = get_Add_left(n); ir_node *const right = get_Add_right(n); ir_node *const in[] = { left, right }; ir_node *const result = make_softfloat_call(n, "add", ARRAY_SIZE(in), in); exchange(n, result); return true; }
/** * Adapts the mode of the given node. */ static void lower_mode(ir_node *n, void *env) { (void)env; ir_op *op = get_irn_op(n); lower_softfloat_func lower_func = (lower_softfloat_func) op->ops.generic; ir_mode *mode = get_irn_mode(n); if (lower_func != NULL) { lower_func(n); return; } set_irn_mode(n, get_lowered_mode(mode)); }
static ir_node *arm_new_reload(ir_node *value, ir_node *spill, ir_node *before) { ir_node *block = get_block(before); ir_graph *irg = get_irn_irg(before); ir_node *frame = get_irg_frame(irg); ir_mode *mode = get_irn_mode(value); ir_node *load = new_bd_arm_Ldr(NULL, block, frame, spill, mode, NULL, false, 0, true); ir_node *proj = new_r_Proj(load, mode, pn_arm_Ldr_res); arch_add_irn_flags(load, arch_irn_flag_reload); sched_add_before(before, load); return proj; }
static ir_node *arm_new_spill(ir_node *value, ir_node *after) { ir_node *block = get_block(after); ir_graph *irg = get_irn_irg(after); ir_node *frame = get_irg_frame(irg); ir_node *mem = get_irg_no_mem(irg); ir_mode *mode = get_irn_mode(value); ir_node *store = new_bd_arm_Str(NULL, block, frame, value, mem, mode, NULL, false, 0, true); arch_add_irn_flags(store, arch_irn_flag_spill); sched_add_after(after, store); return store; }
/** * Adapts floating point constants. */ static bool lower_Const(ir_node *const n) { ir_mode *mode = get_irn_mode(n); if (!mode_is_float(mode)) return false; ir_tarval *float_tv = get_Const_tarval(n); ir_mode *lowered_mode = get_lowered_mode(mode); ir_tarval *int_tv = tarval_bitcast(float_tv, lowered_mode); set_irn_mode(n, lowered_mode); set_Const_tarval(n, int_tv); return true; }
/** * Adjust the size of a node representing a stack alloc to a certain * stack_alignment. * * @param size the node containing the non-aligned size * @param block the block where new nodes are allocated on * @return a node representing the aligned size */ static ir_node *adjust_alloc_size(dbg_info *dbgi, ir_node *size, ir_node *block) { /* Example: po2_alignment 4 (align to 16 bytes): * size = (size+15) & 0xfff...f8 */ ir_mode *mode = get_irn_mode(size); ir_graph *irg = get_irn_irg(block); ir_tarval *allone = get_mode_all_one(mode); ir_tarval *shr = tarval_shr_unsigned(allone, po2_stack_alignment); ir_tarval *mask = tarval_shl_unsigned(shr, po2_stack_alignment); ir_tarval *invmask = tarval_not(mask); ir_node *addv = new_r_Const(irg, invmask); ir_node *add = new_rd_Add(dbgi, block, size, addv); ir_node *maskc = new_r_Const(irg, mask); ir_node *and = new_rd_And(dbgi, block, add, maskc); return and; }
ir_node *be_new_CopyKeep(ir_node *const bl, ir_node *const src, int const n, ir_node *const *const in_keep) { ir_mode *mode = get_irn_mode(src); ir_graph *irg = get_irn_irg(bl); int arity = n+1; ir_node **in = ALLOCAN(ir_node*, arity); in[0] = src; MEMCPY(&in[1], in_keep, n); ir_node *irn = new_ir_node(NULL, irg, bl, op_be_CopyKeep, mode, arity, in); set_copy_info(irn, irg, src, arch_irn_flag_schedule_first); for (int i = 0; i < n; ++i) { ir_node *pred = in_keep[i]; arch_register_req_t const *const req = arch_get_irn_register_req(pred); be_node_set_register_req_in(irn, i + 1, req->cls->class_req); } return irn; }
/** * Adjust the size of a node representing a stack alloc to a certain * stack_alignment. * * @param size the node containing the non-aligned size * @param block the block where new nodes are allocated on * @return a node representing the aligned size */ static ir_node *adjust_alloc_size(dbg_info *dbgi, ir_node *size, ir_node *block) { if (stack_alignment <= 1) return size; if (is_Const(size) && !lower_constant_sizes) return size; ir_mode *mode = get_irn_mode(size); ir_tarval *tv = new_tarval_from_long(stack_alignment-1, mode); ir_graph *irg = get_Block_irg(block); ir_node *mask = new_r_Const(irg, tv); size = new_rd_Add(dbgi, block, size, mask, mode); tv = new_tarval_from_long(-(long)stack_alignment, mode); mask = new_r_Const(irg, tv); size = new_rd_And(dbgi, block, size, mask, mode); return size; }
/** * lower 64bit addition: an 32bit add for the lower parts, an add with * carry for the higher parts. If the carry's value is known, fold it * into the upper add. */ static void ia32_lower_add64(ir_node *node, ir_mode *mode) { dbg_info *dbg = get_irn_dbg_info(node); ir_node *block = get_nodes_block(node); ir_node *left = get_Add_left(node); ir_node *right = get_Add_right(node); ir_node *left_low = get_lowered_low(left); ir_node *left_high = get_lowered_high(left); ir_node *right_low = get_lowered_low(right); ir_node *right_high = get_lowered_high(right); ir_mode *low_mode = get_irn_mode(left_low); ir_mode *high_mode = get_irn_mode(left_high); carry_result cr = lower_add_carry(left, right, low_mode); assert(get_irn_mode(left_low) == get_irn_mode(right_low)); assert(get_irn_mode(left_high) == get_irn_mode(right_high)); if (cr == no_carry) { ir_node *add_low = new_rd_Add(dbg, block, left_low, right_low, low_mode); ir_node *add_high = new_rd_Add(dbg, block, left_high, right_high, high_mode); ir_set_dw_lowered(node, add_low, add_high); } else if (cr == must_carry && (is_Const(left_high) || is_Const(right_high))) { // We cannot assume that left_high and right_high form a normalized Add. ir_node *constant; ir_node *other; if (is_Const(left_high)) { constant = left_high; other = right_high; } else { constant = right_high; other = left_high; } ir_graph *irg = get_irn_irg(right_high); ir_node *one = new_rd_Const(dbg, irg, get_mode_one(high_mode)); ir_node *const_plus_one = new_rd_Add(dbg, block, constant, one, high_mode); ir_node *add_high = new_rd_Add(dbg, block, other, const_plus_one, high_mode); ir_node *add_low = new_rd_Add(dbg, block, left_low, right_low, low_mode); ir_set_dw_lowered(node, add_low, add_high); } else { /* l_res = a_l + b_l */ ir_node *add_low = new_bd_ia32_l_Add(dbg, block, left_low, right_low); ir_mode *mode_flags = ia32_reg_classes[CLASS_ia32_flags].mode; ir_node *res_low = new_r_Proj(add_low, ia32_mode_gp, pn_ia32_l_Add_res); ir_node *flags = new_r_Proj(add_low, mode_flags, pn_ia32_l_Add_flags); /* h_res = a_h + b_h + carry */ ir_node *add_high = new_bd_ia32_l_Adc(dbg, block, left_high, right_high, flags, mode); ir_set_dw_lowered(node, res_low, add_high); } }
/* * Check, if the value of a node cannot represent a NULL pointer. * * - Sels are skipped * - A SymConst(entity) is NEVER a NULL pointer * - Confirms are evaluated */ int value_not_null(const ir_node *n, const ir_node **confirm) { ir_tarval *tv; *confirm = NULL; tv = value_of(n); if (tarval_is_constant(tv) && ! tarval_is_null(tv)) return 1; assert(mode_is_reference(get_irn_mode(n))); /* skip all Sel nodes */ while (is_Sel(n)) { n = get_Sel_ptr(n); } while (1) { if (is_Proj(n)) { n = get_Proj_pred(n); continue; } break; } if (is_SymConst_addr_ent(n)) { /* global references are never NULL */ return 1; } else if (n == get_irg_frame(get_irn_irg(n))) { /* local references are never NULL */ return 1; } else if (is_Alloc(n)) { /* alloc never returns NULL (it throws an exception instead) */ return 1; } else { /* check for more Confirms */ for (; is_Confirm(n); n = get_Confirm_value(n)) { if (get_Confirm_relation(n) == ir_relation_less_greater) { ir_node *bound = get_Confirm_bound(n); ir_tarval *tv = value_of(bound); if (tarval_is_null(tv)) { *confirm = n; return 1; } } } } return 0; }
static void lower64_add(ir_node *node, ir_mode *mode) { dbg_info *dbgi = get_irn_dbg_info(node); ir_node *block = get_nodes_block(node); ir_node *left = get_Add_left(node); ir_node *right = get_Add_right(node); ir_node *left_low = get_lowered_low(left); ir_node *left_high = get_lowered_high(left); ir_node *right_low = get_lowered_low(right); ir_node *right_high = get_lowered_high(right); ir_node *adds = new_bd_arm_AddS_t(dbgi, block, left_low, right_low); ir_mode *mode_low = get_irn_mode(left_low); ir_node *res_low = new_r_Proj(adds, mode_low, pn_arm_AddS_t_res); ir_node *res_flags = new_r_Proj(adds, mode_ANY, pn_arm_AddS_t_flags); ir_node *adc = new_bd_arm_AdC_t(dbgi, block, left_high, right_high, res_flags, mode); ir_set_dw_lowered(node, res_low, adc); }
static void lower64_sub(ir_node *node, ir_mode *mode) { dbg_info *dbgi = get_irn_dbg_info(node); ir_node *block = get_nodes_block(node); ir_node *left = get_Sub_left(node); ir_node *right = get_Sub_right(node); ir_node *left_low = get_lowered_low(left); ir_node *left_high = get_lowered_high(left); ir_node *right_low = get_lowered_low(right); ir_node *right_high = get_lowered_high(right); ir_node *subs = new_bd_arm_SubS_t(dbgi, block, left_low, right_low); ir_mode *mode_low = get_irn_mode(left_low); ir_node *res_low = new_r_Proj(subs, mode_low, pn_arm_SubS_t_res); ir_node *res_flags = new_r_Proj(subs, mode_ANY, pn_arm_SubS_t_flags); ir_node *sbc = new_bd_arm_SbC_t(dbgi, block, left_high, right_high, res_flags, mode); ir_set_dw_lowered(node, res_low, sbc); }
/** * Turn a small CopyB node into a series of Load/Store nodes. */ static void lower_small_copyb_node(ir_node *irn) { ir_graph *irg = get_irn_irg(irn); dbg_info *dbgi = get_irn_dbg_info(irn); ir_node *block = get_nodes_block(irn); ir_type *tp = get_CopyB_type(irn); ir_node *addr_src = get_CopyB_src(irn); ir_node *addr_dst = get_CopyB_dst(irn); ir_node *mem = get_CopyB_mem(irn); ir_mode *mode_ref = get_irn_mode(addr_src); unsigned mode_bytes = allow_misalignments ? native_mode_bytes : get_type_alignment(tp); unsigned size = get_type_size(tp); unsigned offset = 0; bool is_volatile = get_CopyB_volatility(irn) == volatility_is_volatile; ir_cons_flags flags = is_volatile ? cons_volatile : cons_none; while (offset < size) { ir_mode *mode = get_ir_mode(mode_bytes); for (; offset + mode_bytes <= size; offset += mode_bytes) { ir_mode *mode_ref_int = get_reference_offset_mode(mode_ref); /* construct offset */ ir_node *addr_const = new_r_Const_long(irg, mode_ref_int, offset); ir_node *add = new_r_Add(block, addr_src, addr_const); ir_node *load = new_rd_Load(dbgi, block, mem, add, mode, tp, flags); ir_node *load_res = new_r_Proj(load, mode, pn_Load_res); ir_node *load_mem = new_r_Proj(load, mode_M, pn_Load_M); ir_node *addr_const2 = new_r_Const_long(irg, mode_ref_int, offset); ir_node *add2 = new_r_Add(block, addr_dst, addr_const2); ir_node *store = new_rd_Store(dbgi, block, load_mem, add2, load_res, tp, flags); ir_node *store_mem = new_r_Proj(store, mode_M, pn_Store_M); mem = store_mem; } mode_bytes /= 2; } exchange(irn, mem); }
/** * lower 64bit subtraction: a 32bit sub for the lower parts, a sub * with borrow for the higher parts. If the borrow's value is known, * fold it into the upper sub. */ static void ia32_lower_sub64(ir_node *node, ir_mode *mode) { dbg_info *dbg = get_irn_dbg_info(node); ir_node *block = get_nodes_block(node); ir_node *left = get_Sub_left(node); ir_node *right = get_Sub_right(node); ir_node *left_low = get_lowered_low(left); ir_node *left_high = get_lowered_high(left); ir_node *right_low = get_lowered_low(right); ir_node *right_high = get_lowered_high(right); ir_mode *low_mode = get_irn_mode(left_low); ir_mode *high_mode = get_irn_mode(left_high); carry_result cr = lower_sub_borrow(left, right, low_mode); assert(get_irn_mode(left_low) == get_irn_mode(right_low)); assert(get_irn_mode(left_high) == get_irn_mode(right_high)); if (cr == no_carry) { ir_node *sub_low = new_rd_Sub(dbg, block, left_low, right_low, low_mode); ir_node *sub_high = new_rd_Sub(dbg, block, left_high, right_high, high_mode); ir_set_dw_lowered(node, sub_low, sub_high); } else if (cr == must_carry && (is_Const(left_high) || is_Const(right_high))) { ir_node *sub_high; ir_graph *irg = get_irn_irg(right_high); ir_node *one = new_rd_Const(dbg, irg, get_mode_one(high_mode)); if (is_Const(right_high)) { ir_node *new_const = new_rd_Add(dbg, block, right_high, one, high_mode); sub_high = new_rd_Sub(dbg, block, left_high, new_const, high_mode); } else if (is_Const(left_high)) { ir_node *new_const = new_rd_Sub(dbg, block, left_high, one, high_mode); sub_high = new_rd_Sub(dbg, block, new_const, right_high, high_mode); } else { panic("logic error"); } ir_node *sub_low = new_rd_Sub(dbg, block, left_low, right_low, low_mode); ir_set_dw_lowered(node, sub_low, sub_high); } else { /* l_res = a_l - b_l */ ir_node *sub_low = new_bd_ia32_l_Sub(dbg, block, left_low, right_low); ir_mode *mode_flags = ia32_reg_classes[CLASS_ia32_flags].mode; ir_node *res_low = new_r_Proj(sub_low, ia32_mode_gp, pn_ia32_l_Sub_res); ir_node *flags = new_r_Proj(sub_low, mode_flags, pn_ia32_l_Sub_flags); /* h_res = a_h - b_h - carry */ ir_node *sub_high = new_bd_ia32_l_Sbb(dbg, block, left_high, right_high, flags, mode); ir_set_dw_lowered(node, res_low, sub_high); } }
static void lower64_minus(ir_node *node, ir_mode *mode) { dbg_info *dbgi = get_irn_dbg_info(node); ir_graph *irg = get_irn_irg(node); ir_node *block = get_nodes_block(node); ir_node *op = get_Minus_op(node); ir_node *right_low = get_lowered_low(op); ir_node *right_high = get_lowered_high(op); ir_mode *low_unsigned = get_irn_mode(right_low); ir_node *left_low = new_r_Const_null(irg, low_unsigned); ir_node *left_high = new_r_Const_null(irg, mode); ir_node *subs = new_bd_arm_SubS_t(dbgi, block, left_low, right_low); ir_node *res_low = new_r_Proj(subs, low_unsigned, pn_arm_SubS_t_res); ir_node *res_flags = new_r_Proj(subs, mode_ANY, pn_arm_SubS_t_flags); ir_node *sbc = new_bd_arm_SbC_t(dbgi, block, left_high, right_high, res_flags, mode); ir_set_dw_lowered(node, res_low, sbc); }