static ir_node *convert_to_modeb(ir_node *node) { ir_node *block = get_nodes_block(node); ir_graph *irg = get_irn_irg(node); ir_node *zero = new_r_Const_null(irg, lowered_mode); ir_node *cmp = new_r_Cmp(block, node, zero, ir_relation_less_greater); return cmp; }
ir_node *be_new_d_Copy(dbg_info *const dbgi, ir_node *const block, ir_node *const op) { ir_graph *const irg = get_irn_irg(block); ir_node *const in[] = { op }; ir_node *const res = new_ir_node(dbgi, irg, block, op_be_Copy, get_irn_mode(op), ARRAY_SIZE(in), in); set_copy_info(res, irg, op, arch_irn_flags_none); return res; }
int block_postdominates(const ir_node *a, const ir_node *b) { assert(irg_has_properties(get_irn_irg(a), IR_GRAPH_PROPERTY_CONSISTENT_POSTDOMINANCE)); const ir_dom_info *ai = get_pdom_info_const(a); const ir_dom_info *bi = get_pdom_info_const(b); return bi->tree_pre_num - ai->tree_pre_num <= ai->max_subtree_pre_num - ai->tree_pre_num; }
static ir_node *create_fpu_mode_reload(void *const env, ir_node *const state, ir_node *const spill, ir_node *const before, ir_node *const last_state) { (void)env; (void)state; ir_node *reload; ir_node *const block = get_nodes_block(before); ir_graph *const irg = get_irn_irg(block); ir_node *const noreg = ia32_new_NoReg_gp(irg); ir_node *const nomem = get_irg_no_mem(irg); if (ia32_cg_config.use_unsafe_floatconv) { reload = new_bd_ia32_FldCW(NULL, block, noreg, noreg, nomem); ir_entity *const rounding_mode = spill ? create_ent(&fpcw_round, 0xC7F, "_fpcw_round") : create_ent(&fpcw_truncate, 0x37F, "_fpcw_truncate"); set_ia32_am_ent(reload, rounding_mode); } else { ir_node *mem; ir_node *const frame = get_irg_frame(irg); if (spill) { mem = spill; } else { assert(last_state); ir_node *const cwstore = create_fnstcw(block, frame, noreg, nomem, last_state); sched_add_before(before, cwstore); ir_node *const load = new_bd_ia32_Load(NULL, block, frame, noreg, cwstore); set_ia32_op_type(load, ia32_AddrModeS); set_ia32_ls_mode(load, mode_Hu); set_ia32_frame_use(load, IA32_FRAME_USE_32BIT); sched_add_before(before, load); ir_node *const load_res = new_r_Proj(load, ia32_mode_gp, pn_ia32_Load_res); /* TODO: Make the actual mode configurable in ChangeCW. */ ir_node *const or_const = ia32_create_Immediate(irg, 0xC00); ir_node *const orn = new_bd_ia32_Or(NULL, block, noreg, noreg, nomem, load_res, or_const); sched_add_before(before, orn); ir_node *const store = new_bd_ia32_Store(NULL, block, frame, noreg, nomem, orn); set_ia32_op_type(store, ia32_AddrModeD); /* Use ia32_mode_gp, as movl has a shorter opcode than movw. */ set_ia32_ls_mode(store, ia32_mode_gp); set_ia32_frame_use(store, IA32_FRAME_USE_32BIT); sched_add_before(before, store); mem = new_r_Proj(store, mode_M, pn_ia32_Store_M); } reload = new_bd_ia32_FldCW(NULL, block, frame, noreg, mem); } set_ia32_op_type(reload, ia32_AddrModeS); set_ia32_ls_mode(reload, ia32_mode_fpcw); set_ia32_frame_use(reload, IA32_FRAME_USE_32BIT); arch_set_irn_register(reload, &ia32_registers[REG_FPCW]); sched_add_before(before, reload); return reload; }
ir_node *be_new_Copy(ir_node *bl, ir_node *op) { ir_graph *irg = get_irn_irg(bl); ir_node *in[] = { op }; ir_node *res = new_ir_node(NULL, irg, bl, op_be_Copy, get_irn_mode(op), ARRAY_SIZE(in), in); set_copy_info(res, irg, op, arch_irn_flags_none); return res; }
void init_ia32_x87_attributes(ir_node *res) { #ifndef NDEBUG ia32_attr_t *attr = get_ia32_attr(res); attr->attr_type |= IA32_ATTR_ia32_x87_attr_t; #endif ir_graph *const irg = get_irn_irg(res); ia32_request_x87_sim(irg); }
/** Creates a Phi node with 0 predecessors. */ static inline ir_node *new_rd_Phi0(dbg_info *dbgi, ir_node *block, ir_mode *mode, int pos) { ir_graph *irg = get_irn_irg(block); ir_node *res = new_ir_node(dbgi, irg, block, op_Phi, mode, 0, NULL); res->attr.phi.u.pos = pos; verify_new_node(res); return res; }
static const char *ia32_get_old_node_name(const ir_node *irn) { ir_graph *irg = get_irn_irg(irn); struct obstack *obst = be_get_be_obst(irg); lc_eoprintf(firm_get_arg_env(), obst, "%+F", irn); obstack_1grow(obst, 0); return (const char*)obstack_finish(obst); }
static ir_node *create_not(dbg_info *dbgi, ir_node *node) { ir_node *block = get_nodes_block(node); ir_mode *mode = lowered_mode; ir_graph *irg = get_irn_irg(node); ir_node *one = new_rd_Const_one(dbgi, irg, mode); return new_rd_Eor(dbgi, block, node, one); }
void be_info_init_irn(ir_node *const node, arch_irn_flags_t const flags, arch_register_req_t const **const in_reqs, unsigned const n_res) { ir_graph *const irg = get_irn_irg(node); struct obstack *const obst = get_irg_obstack(irg); backend_info_t *const info = be_get_info(node); info->flags = flags; info->in_reqs = in_reqs; info->out_infos = NEW_ARR_DZ(reg_out_info_t, obst, n_res); }
/** * Block-walker, remove Bad block predecessors and shorten Phis. * Phi links must be up-to-date. */ static void block_remove_bads(ir_node *block) { /* 1. Create a new block without Bad inputs */ ir_graph *irg = get_irn_irg(block); const int max = get_Block_n_cfgpreds(block); ir_node **new_in = ALLOCAN(ir_node*, max); unsigned new_max = 0; for (int i = 0; i < max; ++i) { ir_node *const block_pred = get_Block_cfgpred(block, i); if (!is_Bad(block_pred)) { new_in[new_max++] = block_pred; } } /* If the end block is unreachable, it might have zero predecessors. */ if (new_max == 0) { ir_node *end_block = get_irg_end_block(irg); if (block == end_block) { set_irn_in(block, new_max, new_in); return; } } dbg_info *dbgi = get_irn_dbg_info(block); ir_node *new_block = new_rd_Block(dbgi, irg, new_max, new_in); ir_entity *block_entity = get_Block_entity(block); set_Block_entity(new_block, block_entity); /* 2. Remove inputs on Phis, where the block input is Bad. */ for (ir_node *phi = get_Block_phis(block), *next; phi != NULL; phi = next) { next = get_Phi_next(phi); assert(get_irn_arity(phi) == max); unsigned j = 0; foreach_irn_in(phi, i, pred) { ir_node *const block_pred = get_Block_cfgpred(block, i); if (!is_Bad(block_pred)) { new_in[j++] = pred; } } assert(j == new_max); /* shortcut if only 1 phi input is left */ if (new_max == 1) { ir_node *new_node = new_in[0]; /* can happen inside unreachable endless loops */ if (new_node == phi) return; if (get_Phi_loop(phi)) remove_keep_alive(phi); exchange(phi, new_node); } else { set_irn_in(phi, new_max, new_in); } }
void irg_walk(ir_node *node, irg_walk_func *pre, irg_walk_func *post, void *env) { ir_graph *irg = get_irn_irg(node); ir_reserve_resources(irg, IR_RESOURCE_IRN_VISITED); inc_irg_visited(irg); irg_walk_core(node, pre, post, env); ir_free_resources(irg, IR_RESOURCE_IRN_VISITED); }
ir_node *be_new_Phi0(ir_node *const block, ir_mode *const mode, arch_register_req_t const *const req) { ir_graph *const irg = get_irn_irg(block); ir_node *const phi = new_ir_node(NULL, irg, block, op_Phi, mode, 0, NULL); struct obstack *const obst = be_get_be_obst(irg); backend_info_t *const info = be_get_info(phi); info->out_infos = NEW_ARR_DZ(reg_out_info_t, obst, 1); info->out_infos[0].req = req; return phi; }
/** * Post-walker: prepare the graph nodes for new SSA construction cycle by * allocation new arrays. */ static void prepare_blocks(ir_node *block, void *env) { (void)env; ir_graph *const irg = get_irn_irg(block); unsigned const n_loc = irg->n_loc; /* reset mature flag */ if (block != get_irg_start_block(irg)) set_Block_matured(block, 0); block->attr.block.graph_arr = NEW_ARR_DZ(ir_node*, get_irg_obstack(irg), n_loc); set_Block_phis(block, NULL); }
ir_node *be_new_AnyVal(ir_node *block, const arch_register_class_t *cls) { ir_graph *irg = get_irn_irg(block); ir_mode *mode = cls->mode; ir_node *res = new_ir_node(NULL, irg, block, op_be_AnyVal, mode, 0, NULL); init_node_attr(res, 1, arch_irn_flags_none); arch_set_irn_register_req_out(res, 0, cls->class_req); be_node_attr_t *attr = (be_node_attr_t*)get_irn_generic_attr(res); attr->exc.pin_state = op_pin_state_floats; return res; }
void dom_tree_walk(ir_node *block, irg_walk_func *pre, irg_walk_func *post, void *env) { assert(irg_has_properties(get_irn_irg(block), IR_GRAPH_PROPERTY_CONSISTENT_DOMINANCE)); if (pre != NULL) pre(block, env); dominates_for_each(block, p) { dom_tree_walk(p, pre, post, env); }
/** patches Addresses to work in position independent code */ static void fix_pic_addresses(ir_node *const node, void *const data) { (void)data; ir_graph *const irg = get_irn_irg(node); be_main_env_t *const be = be_get_irg_main_env(irg); foreach_irn_in(node, i, pred) { if (!is_Address(pred)) continue; ir_node *res; ir_entity *const entity = get_Address_entity(pred); dbg_info *const dbgi = get_irn_dbg_info(pred); if (i == n_Call_ptr && is_Call(node)) { /* Calls can jump to relative addresses, so we can directly jump to * the (relatively) known call address or the trampoline */ if (can_address_relative(entity)) continue; ir_entity *const trampoline = get_trampoline(be, entity); res = new_rd_Address(dbgi, irg, trampoline); } else if (get_entity_type(entity) == get_code_type()) { /* Block labels can always be addressed directly. */ continue; } else { /* Everything else is accessed relative to EIP. */ ir_node *const block = get_nodes_block(pred); ir_mode *const mode = get_irn_mode(pred); ir_node *const pic_base = ia32_get_pic_base(irg); if (can_address_relative(entity)) { /* All ok now for locally constructed stuff. */ res = new_rd_Add(dbgi, block, pic_base, pred, mode); /* Make sure the walker doesn't visit this add again. */ mark_irn_visited(res); } else { /* Get entry from pic symbol segment. */ ir_entity *const pic_symbol = get_pic_symbol(be, entity); ir_node *const pic_address = new_rd_Address(dbgi, irg, pic_symbol); ir_node *const add = new_rd_Add(dbgi, block, pic_base, pic_address, mode); mark_irn_visited(add); /* We need an extra indirection for global data outside our current * module. The loads are always safe and can therefore float and * need no memory input */ ir_type *const type = get_entity_type(entity); ir_node *const nomem = get_irg_no_mem(irg); ir_node *const load = new_rd_Load(dbgi, block, nomem, add, mode, type, cons_floats); res = new_r_Proj(load, mode, pn_Load_res); } } set_irn_n(node, i, res); } }
/** * Initializes the nodes attributes. */ static void init_sparc_attributes(ir_node *node, arch_irn_flags_t flags, const arch_register_req_t **in_reqs, int n_res) { arch_set_irn_flags(node, flags); arch_set_irn_register_reqs_in(node, in_reqs); backend_info_t *info = be_get_info(node); ir_graph *irg = get_irn_irg(node); struct obstack *obst = get_irg_obstack(irg); info->out_infos = NEW_ARR_DZ(reg_out_info_t, obst, n_res); }
void dmemory_lower_Arraylength(ir_node *arraylength) { ir_node *array_ref = get_Arraylength_arrayref(arraylength); ir_node *block = get_nodes_block(arraylength); ir_graph *irg = get_irn_irg(block); ir_node *cur_mem = get_Arraylength_mem(arraylength); ir_node *len = (*dmemory_model.get_arraylength)(array_ref, irg, block, &cur_mem); turn_into_tuple(arraylength, pn_Arraylength_max); set_irn_n(arraylength, pn_Arraylength_M, cur_mem); set_irn_n(arraylength, pn_Arraylength_res, len); }
void be_make_start_out(be_start_info_t *const info, ir_node *const start, unsigned const pos, arch_register_t const *const reg, bool const ignore) { info->pos = pos; info->irn = NULL; arch_register_req_t const *const req = ignore ? be_create_reg_req(be_get_be_obst(get_irn_irg(start)), reg, true) : reg->single_req; arch_set_irn_register_req_out(start, pos, req); arch_set_irn_register_out(start, pos, reg); }
ir_node *be_new_Asm(dbg_info *const dbgi, ir_node *const block, int const n_ins, ir_node **const ins, arch_register_req_t const **const in_reqs, int const n_outs, ident *const text, void *const operands) { ir_graph *const irg = get_irn_irg(block); ir_node *const asmn = new_ir_node(dbgi, irg, block, op_be_Asm, mode_T, n_ins, ins); be_info_init_irn(asmn, arch_irn_flags_none, in_reqs, n_outs); be_asm_attr_t *const attr = (be_asm_attr_t*)get_irn_generic_attr(asmn); attr->text = text; attr->operands = operands; return asmn; }
/** * lower 64bit addition: an 32bit add for the lower parts, an add with * carry for the higher parts. If the carry's value is known, fold it * into the upper add. */ static void ia32_lower_add64(ir_node *node, ir_mode *mode) { dbg_info *dbg = get_irn_dbg_info(node); ir_node *block = get_nodes_block(node); ir_node *left = get_Add_left(node); ir_node *right = get_Add_right(node); ir_node *left_low = get_lowered_low(left); ir_node *left_high = get_lowered_high(left); ir_node *right_low = get_lowered_low(right); ir_node *right_high = get_lowered_high(right); ir_mode *low_mode = get_irn_mode(left_low); ir_mode *high_mode = get_irn_mode(left_high); carry_result cr = lower_add_carry(left, right, low_mode); assert(get_irn_mode(left_low) == get_irn_mode(right_low)); assert(get_irn_mode(left_high) == get_irn_mode(right_high)); if (cr == no_carry) { ir_node *add_low = new_rd_Add(dbg, block, left_low, right_low, low_mode); ir_node *add_high = new_rd_Add(dbg, block, left_high, right_high, high_mode); ir_set_dw_lowered(node, add_low, add_high); } else if (cr == must_carry && (is_Const(left_high) || is_Const(right_high))) { // We cannot assume that left_high and right_high form a normalized Add. ir_node *constant; ir_node *other; if (is_Const(left_high)) { constant = left_high; other = right_high; } else { constant = right_high; other = left_high; } ir_graph *irg = get_irn_irg(right_high); ir_node *one = new_rd_Const(dbg, irg, get_mode_one(high_mode)); ir_node *const_plus_one = new_rd_Add(dbg, block, constant, one, high_mode); ir_node *add_high = new_rd_Add(dbg, block, other, const_plus_one, high_mode); ir_node *add_low = new_rd_Add(dbg, block, left_low, right_low, low_mode); ir_set_dw_lowered(node, add_low, add_high); } else { /* l_res = a_l + b_l */ ir_node *add_low = new_bd_ia32_l_Add(dbg, block, left_low, right_low); ir_mode *mode_flags = ia32_reg_classes[CLASS_ia32_flags].mode; ir_node *res_low = new_r_Proj(add_low, ia32_mode_gp, pn_ia32_l_Add_res); ir_node *flags = new_r_Proj(add_low, mode_flags, pn_ia32_l_Add_flags); /* h_res = a_h + b_h + carry */ ir_node *add_high = new_bd_ia32_l_Adc(dbg, block, left_high, right_high, flags, mode); ir_set_dw_lowered(node, res_low, add_high); } }
/** * This function returns the last definition of a value. In case * this value was last defined in a previous block, Phi nodes are * inserted. If the part of the firm graph containing the definition * is not yet constructed, a dummy Phi node is returned. * * @param block the current block * @param pos the value number of the value searched * @param mode the mode of this value (needed for Phi construction) */ static ir_node *get_r_value_internal(ir_node *block, int pos, ir_mode *mode) { ir_node *res = block->attr.block.graph_arr[pos]; if (res != NULL) return res; /* in a matured block we can immediately determine the phi arguments */ if (get_Block_matured(block)) { ir_graph *const irg = get_irn_irg(block); int const arity = get_irn_arity(block); /* no predecessors: use unknown value */ if (arity == 0) { if (block == get_irg_start_block(irg)) { if (default_initialize_local_variable != NULL) { ir_node *rem = get_r_cur_block(irg); set_r_cur_block(irg, block); res = default_initialize_local_variable(irg, mode, pos - 1); set_r_cur_block(irg, rem); } else { res = new_r_Unknown(irg, mode); } } else { goto bad; /* unreachable block, use Bad */ } /* one predecessor just use its value */ } else if (arity == 1) { ir_node *cfgpred = get_Block_cfgpred(block, 0); if (is_Bad(cfgpred)) { bad: res = new_r_Bad(irg, mode); } else { ir_node *cfgpred_block = get_nodes_block(cfgpred); res = get_r_value_internal(cfgpred_block, pos, mode); } } else { /* multiple predecessors construct Phi */ res = new_rd_Phi0(NULL, block, mode, pos); /* enter phi0 into our variable value table to break cycles * arising from set_phi_arguments */ block->attr.block.graph_arr[pos] = res; res = set_phi_arguments(res, pos); } } else { /* in case of immature block we have to keep a Phi0 */ res = new_rd_Phi0(NULL, block, mode, pos); /* enqueue phi so we can set arguments once the block matures */ res->attr.phi.next = block->attr.block.phis; block->attr.block.phis = res; } block->attr.block.graph_arr[pos] = res; return res; }
static ir_node *arm_new_spill(ir_node *value, ir_node *after) { ir_node *block = get_block(after); ir_graph *irg = get_irn_irg(after); ir_node *frame = get_irg_frame(irg); ir_node *mem = get_irg_no_mem(irg); ir_mode *mode = get_irn_mode(value); ir_node *store = new_bd_arm_Str(NULL, block, frame, value, mem, mode, NULL, false, 0, true); arch_add_irn_flags(store, arch_irn_flag_spill); sched_add_after(after, store); return store; }
static ir_node *arm_new_reload(ir_node *value, ir_node *spill, ir_node *before) { ir_node *block = get_block(before); ir_graph *irg = get_irn_irg(before); ir_node *frame = get_irg_frame(irg); ir_mode *mode = get_irn_mode(value); ir_node *load = new_bd_arm_Ldr(NULL, block, frame, spill, mode, NULL, false, 0, true); ir_node *proj = new_r_Proj(load, mode, pn_arm_Ldr_res); arch_add_irn_flags(load, arch_irn_flag_reload); sched_add_before(before, load); return proj; }
ir_node *be_new_MemPerm(ir_node *const block, int n, ir_node *const *const in) { ir_graph *const irg = get_irn_irg(block); ir_node *const irn = new_ir_node(NULL, irg, block, op_be_MemPerm, mode_T, n, in); init_node_attr(irn, n, arch_irn_flags_none); be_memperm_attr_t *attr = (be_memperm_attr_t*)get_irn_generic_attr(irn); attr->in_entities = OALLOCNZ(get_irg_obstack(irg), ir_entity*, n); attr->out_entities = OALLOCNZ(get_irg_obstack(irg), ir_entity*, n); attr->offset = 0; return irn; }
/** * lower 64bit subtraction: a 32bit sub for the lower parts, a sub * with borrow for the higher parts. If the borrow's value is known, * fold it into the upper sub. */ static void ia32_lower_sub64(ir_node *node, ir_mode *mode) { dbg_info *dbg = get_irn_dbg_info(node); ir_node *block = get_nodes_block(node); ir_node *left = get_Sub_left(node); ir_node *right = get_Sub_right(node); ir_node *left_low = get_lowered_low(left); ir_node *left_high = get_lowered_high(left); ir_node *right_low = get_lowered_low(right); ir_node *right_high = get_lowered_high(right); ir_mode *low_mode = get_irn_mode(left_low); ir_mode *high_mode = get_irn_mode(left_high); carry_result cr = lower_sub_borrow(left, right, low_mode); assert(get_irn_mode(left_low) == get_irn_mode(right_low)); assert(get_irn_mode(left_high) == get_irn_mode(right_high)); if (cr == no_carry) { ir_node *sub_low = new_rd_Sub(dbg, block, left_low, right_low, low_mode); ir_node *sub_high = new_rd_Sub(dbg, block, left_high, right_high, high_mode); ir_set_dw_lowered(node, sub_low, sub_high); } else if (cr == must_carry && (is_Const(left_high) || is_Const(right_high))) { ir_node *sub_high; ir_graph *irg = get_irn_irg(right_high); ir_node *one = new_rd_Const(dbg, irg, get_mode_one(high_mode)); if (is_Const(right_high)) { ir_node *new_const = new_rd_Add(dbg, block, right_high, one, high_mode); sub_high = new_rd_Sub(dbg, block, left_high, new_const, high_mode); } else if (is_Const(left_high)) { ir_node *new_const = new_rd_Sub(dbg, block, left_high, one, high_mode); sub_high = new_rd_Sub(dbg, block, new_const, right_high, high_mode); } else { panic("logic error"); } ir_node *sub_low = new_rd_Sub(dbg, block, left_low, right_low, low_mode); ir_set_dw_lowered(node, sub_low, sub_high); } else { /* l_res = a_l - b_l */ ir_node *sub_low = new_bd_ia32_l_Sub(dbg, block, left_low, right_low); ir_mode *mode_flags = ia32_reg_classes[CLASS_ia32_flags].mode; ir_node *res_low = new_r_Proj(sub_low, ia32_mode_gp, pn_ia32_l_Sub_res); ir_node *flags = new_r_Proj(sub_low, mode_flags, pn_ia32_l_Sub_flags); /* h_res = a_h - b_h - carry */ ir_node *sub_high = new_bd_ia32_l_Sbb(dbg, block, left_high, right_high, flags, mode); ir_set_dw_lowered(node, res_low, sub_high); } }
static void introduce_epilog(ir_node *ret) { arch_register_t const *const sp_reg = &arm_registers[REG_SP]; assert(arch_get_irn_register_req_in(ret, n_arm_Return_sp) == sp_reg->single_req); ir_node *const sp = get_irn_n(ret, n_arm_Return_sp); ir_node *const block = get_nodes_block(ret); ir_graph *const irg = get_irn_irg(ret); ir_type *const frame_type = get_irg_frame_type(irg); unsigned const frame_size = get_type_size_bytes(frame_type); ir_node *const incsp = be_new_IncSP(sp_reg, block, sp, -frame_size, 0); set_irn_n(ret, n_arm_Return_sp, incsp); sched_add_before(ret, incsp); }
ir_node *be_new_Keep(ir_node *const block, int const n, ir_node *const *const in) { ir_graph *irg = get_irn_irg(block); ir_node *res = new_ir_node(NULL, irg, block, op_be_Keep, mode_ANY, n, in); init_node_attr(res, 1, arch_irn_flag_schedule_first); for (int i = 0; i < n; ++i) { arch_register_req_t const *const req = arch_get_irn_register_req(in[i]); be_node_set_register_req_in(res, i, req->cls->class_req); } keep_alive(res); return res; }
static ir_node *transform_end(ir_node *node) { /* Do not transform predecessors yet to keep the pre-transform * phase from visiting all the graph. */ ir_node *const block = be_transform_nodes_block(node); ir_node *const new_end = exact_copy(node); set_nodes_block(new_end, block); ir_graph *const irg = get_irn_irg(new_end); set_irg_end(irg, new_end); be_enqueue_preds(node); return new_end; }