/* * The 64-bit version of libgcc does not contain some builtin * functions for 32-bit values (__<builtin>si2) anymore. */ static void widen_builtin(ir_node *node) { ir_type *mtp = get_Builtin_type(node); ir_type *arg1 = get_method_param_type(mtp, 0); // Nothing to do, if argument size is at least machine size. if (get_type_size(arg1) >= ir_target_pointer_size()) return; // Only touch builtins with no 32-bit version. ir_builtin_kind kind = get_Builtin_kind(node); if (kind != ir_bk_clz && kind != ir_bk_ctz && kind != ir_bk_ffs && kind != ir_bk_parity && kind != ir_bk_popcount) { return; } ir_mode *target_mode = get_reference_offset_mode(mode_P); dbg_info *dbgi = get_irn_dbg_info(node); ir_node *block = get_nodes_block(node); ir_node *op = get_irn_n(node, n_Builtin_max + 1); ir_node *conv = new_rd_Conv(dbgi, block, op, target_mode); set_irn_n(node, n_Builtin_max + 1, conv); ir_type *new_arg1 = get_type_for_mode(target_mode); ir_type *new_result = get_method_res_type(mtp, 0); ir_type *new_type = new_type_method(1, 1, false, cc_cdecl_set, mtp_no_property); set_method_param_type(new_type, 0, new_arg1); set_method_res_type(new_type, 0, new_result); set_Builtin_type(node, new_type); }
static ir_tarval *fold_expression_to_address(expression_t const *const expr) { switch (expr->kind) { case EXPR_SELECT: { select_expression_t const *const sel = &expr->select; type_t *const type = skip_typeref(sel->compound->base.type); ir_tarval *const base_addr = is_type_pointer(type) ? fold_expression(sel->compound) : fold_expression_to_address(sel->compound); ir_mode *const mode = get_tarval_mode(base_addr); ir_mode *const mode_offset = get_reference_offset_mode(mode); ir_tarval *const offset = new_tarval_from_long(sel->compound_entry->compound_member.offset, mode_offset); return tarval_add(base_addr, offset); } case EXPR_ARRAY_ACCESS: { ir_tarval *const base_addr = fold_expression_to_address(expr->array_access.array_ref); ir_tarval *const idx = fold_expression(expr->array_access.index); ir_mode *const mode = get_ir_mode_arithmetic(type_size_t); ir_tarval *const idx_conv = tarval_convert_to(idx, mode); type_t *const elem_type = skip_typeref(expr->array_access.array_ref->base.type); ir_tarval *const elem_size = get_type_size_tarval(elem_type, mode); return tarval_add(base_addr, tarval_mul(idx_conv, elem_size)); } case EXPR_UNARY_DEREFERENCE: return fold_expression(expr->unary.value); default: panic("unexpected expression kind"); } }
ir_node *gcji_array_data_addr(ir_node *addr) { ir_mode *mode = get_irn_mode(addr); ir_mode *mode_offset = get_reference_offset_mode(mode); unsigned offset = array_header_size; ir_node *offset_cnst = new_Const_long(mode_offset, offset); return new_Add(addr, offset_cnst); }
static ir_node *get_vtable_ref(ir_type *type) { ir_entity *cls_vtable = oo_get_class_vtable_entity(type); if (cls_vtable == NULL) return NULL; ir_graph *ccode = get_const_code_irg(); ir_node *addr = new_r_Address(ccode, cls_vtable); unsigned offset = ddispatch_get_vptr_points_to_index() * get_mode_size_bytes(mode_reference); ir_mode *offset_mode = get_reference_offset_mode(mode_reference); ir_node *cnst = new_r_Const_long(ccode, offset_mode, offset); ir_node *block = get_r_cur_block(ccode); ir_node *add = new_r_Add(block, addr, cnst); return add; }
/** * Turn a small CopyB node into a series of Load/Store nodes. */ static void lower_small_copyb_node(ir_node *irn) { ir_graph *irg = get_irn_irg(irn); dbg_info *dbgi = get_irn_dbg_info(irn); ir_node *block = get_nodes_block(irn); ir_type *tp = get_CopyB_type(irn); ir_node *addr_src = get_CopyB_src(irn); ir_node *addr_dst = get_CopyB_dst(irn); ir_node *mem = get_CopyB_mem(irn); ir_mode *mode_ref = get_irn_mode(addr_src); unsigned mode_bytes = allow_misalignments ? native_mode_bytes : get_type_alignment(tp); unsigned size = get_type_size(tp); unsigned offset = 0; bool is_volatile = get_CopyB_volatility(irn) == volatility_is_volatile; ir_cons_flags flags = is_volatile ? cons_volatile : cons_none; while (offset < size) { ir_mode *mode = get_ir_mode(mode_bytes); for (; offset + mode_bytes <= size; offset += mode_bytes) { ir_mode *mode_ref_int = get_reference_offset_mode(mode_ref); /* construct offset */ ir_node *addr_const = new_r_Const_long(irg, mode_ref_int, offset); ir_node *add = new_r_Add(block, addr_src, addr_const); ir_node *load = new_rd_Load(dbgi, block, mem, add, mode, tp, flags); ir_node *load_res = new_r_Proj(load, mode, pn_Load_res); ir_node *load_mem = new_r_Proj(load, mode_M, pn_Load_M); ir_node *addr_const2 = new_r_Const_long(irg, mode_ref_int, offset); ir_node *add2 = new_r_Add(block, addr_dst, addr_const2); ir_node *store = new_rd_Store(dbgi, block, load_mem, add2, load_res, tp, flags); ir_node *store_mem = new_r_Proj(store, mode_M, pn_Store_M); mem = store_mem; } mode_bytes /= 2; } exchange(irn, mem); }
void be_default_lower_va_arg(ir_node *const node, bool const compound_is_ptr, unsigned const stack_param_align) { ir_node *block = get_nodes_block(node); dbg_info *dbgi = get_irn_dbg_info(node); ir_graph *irg = get_irn_irg(node); ir_type *aptype = get_method_res_type(get_Builtin_type(node), 0); ir_node *const ap = get_irn_n(node, 1); ir_node *const node_mem = get_Builtin_mem(node); ir_mode *apmode = get_type_mode(aptype); ir_node *res; ir_node *new_mem; if (apmode) { goto load; } else if (compound_is_ptr) { apmode = mode_P; aptype = get_type_for_mode(apmode); load:; ir_node *const load = new_rd_Load(dbgi, block, node_mem, ap, apmode, aptype, cons_none); res = new_r_Proj(load, apmode, pn_Load_res); new_mem = new_r_Proj(load, mode_M,pn_Load_M); } else { /* aptype has no associated mode, so it is represented as a pointer. */ res = ap; new_mem = node_mem; } unsigned const round_up = round_up2(get_type_size(aptype), stack_param_align); ir_mode *const offset_mode = get_reference_offset_mode(mode_P); ir_node *const offset = new_r_Const_long(irg, offset_mode, round_up); ir_node *const new_ap = new_rd_Add(dbgi, block, ap, offset); ir_node *const in[] = { new_mem, res, new_ap }; turn_into_tuple(node, ARRAY_SIZE(in), in); }