static void create_divmod_intrinsics(ir_mode *mode_unsigned, ir_mode *mode_signed) { ir_type *const tp_unsigned = get_type_for_mode(mode_unsigned); uldivmod = make_divmod("__aeabi_uldivmod", tp_unsigned, tp_unsigned); ir_type *const tp_signed = get_type_for_mode(mode_signed); ir_type *const even = arm_cg_config.big_endian ? tp_signed : tp_unsigned; ir_type *const odd = arm_cg_config.big_endian ? tp_unsigned : tp_signed; ldivmod = make_divmod("__aeabi_ldivmod", even, odd); }
static ir_type *get_memcpy_methodtype(void) { ir_type *tp = new_type_method(3, 1, false, cc_cdecl_set, mtp_no_property); ir_mode *size_t_mode = get_ir_mode(native_mode_bytes); set_method_param_type(tp, 0, get_type_for_mode(mode_P)); set_method_param_type(tp, 1, get_type_for_mode(mode_P)); set_method_param_type(tp, 2, get_type_for_mode(size_t_mode)); set_method_res_type (tp, 0, get_type_for_mode(mode_P)); return tp; }
static ir_type *get_memcpy_methodtype(void) { ir_type *tp = new_type_method(3, 1); ir_mode *size_t_mode = get_ir_mode(native_mode_bytes); set_method_param_type(tp, 0, get_type_for_mode(mode_P)); set_method_param_type(tp, 1, get_type_for_mode(mode_P)); set_method_param_type(tp, 2, get_type_for_mode(size_t_mode)); set_method_res_type (tp, 0, get_type_for_mode(mode_P)); return tp; }
/* * The 64-bit version of libgcc does not contain some builtin * functions for 32-bit values (__<builtin>si2) anymore. */ static void widen_builtin(ir_node *node) { ir_type *mtp = get_Builtin_type(node); ir_type *arg1 = get_method_param_type(mtp, 0); // Nothing to do, if argument size is at least machine size. if (get_type_size(arg1) >= ir_target_pointer_size()) return; // Only touch builtins with no 32-bit version. ir_builtin_kind kind = get_Builtin_kind(node); if (kind != ir_bk_clz && kind != ir_bk_ctz && kind != ir_bk_ffs && kind != ir_bk_parity && kind != ir_bk_popcount) { return; } ir_mode *target_mode = get_reference_offset_mode(mode_P); dbg_info *dbgi = get_irn_dbg_info(node); ir_node *block = get_nodes_block(node); ir_node *op = get_irn_n(node, n_Builtin_max + 1); ir_node *conv = new_rd_Conv(dbgi, block, op, target_mode); set_irn_n(node, n_Builtin_max + 1, conv); ir_type *new_arg1 = get_type_for_mode(target_mode); ir_type *new_result = get_method_res_type(mtp, 0); ir_type *new_type = new_type_method(1, 1, false, cc_cdecl_set, mtp_no_property); set_method_param_type(new_type, 0, new_arg1); set_method_res_type(new_type, 0, new_result); set_Builtin_type(node, new_type); }
static ir_node *gcji_get_arraylength(dbg_info *dbgi, ir_node *block, ir_node *arrayref, ir_node **mem) { ir_node *addr = new_r_Member(block, arrayref, gcj_array_length); ir_node *load = new_rd_Load(dbgi, block, *mem, addr, mode_int, get_type_for_mode(mode_int), cons_none); ir_node *new_mem = new_r_Proj(load, mode_M, pn_Load_M); ir_node *res = new_r_Proj(load, mode_int, pn_Load_res); *mem = new_mem; return res; }
static void arm_create_runtime_entities(void) { if (divsi3 != NULL) return; ir_mode *mode_int = new_int_mode("arm_be_int", ARM_MACHINE_SIZE, true, ARM_MODULO_SHIFT); ir_mode *mode_uint = new_int_mode("arm_be_int", ARM_MACHINE_SIZE, false, ARM_MODULO_SHIFT); ir_type *int_tp = get_type_for_mode(mode_int); ir_type *uint_tp = get_type_for_mode(mode_uint); ir_type *const mtps = make_divmod_type(int_tp); divsi3 = create_compilerlib_entity("__divsi3", mtps); modsi3 = create_compilerlib_entity("__modsi3", mtps); ir_type *const mtpu = make_divmod_type(uint_tp); udivsi3 = create_compilerlib_entity("__udivsi3", mtpu); umodsi3 = create_compilerlib_entity("__umodsi3", mtpu); }
static ir_type *create_utf8_const_type(void) { ir_type *type_byte = get_type_for_mode(mode_Bu); ir_type *type_var_char_array = new_type_array(type_byte, 0); ident *id = new_id_from_str("utf8_const"); ir_type *type = new_type_struct(id); add_compound_member(type, "hash", type_ushort); add_compound_member(type, "len", type_ushort); add_compound_member(type, "data", type_var_char_array); default_layout_compound_type(type); return type; }
static void arm_create_runtime_entities(void) { if (divsi3 != NULL) return; ir_mode *mode_int = new_int_mode("arm_be_int", irma_twos_complement, ARM_MACHINE_SIZE, true, ARM_MODULO_SHIFT); ir_mode *mode_uint = new_int_mode("arm_be_int", irma_twos_complement, ARM_MACHINE_SIZE, false, ARM_MODULO_SHIFT); ir_type *int_tp = get_type_for_mode(mode_int); ir_type *uint_tp = get_type_for_mode(mode_uint); ir_type *tp_divsi3 = new_type_method(2, 1); set_method_param_type(tp_divsi3, 0, int_tp); set_method_param_type(tp_divsi3, 1, int_tp); set_method_res_type(tp_divsi3, 0, int_tp); divsi3 = create_compilerlib_entity(new_id_from_str("__divsi3"), tp_divsi3); ir_type *tp_udivsi3 = new_type_method(2, 1); set_method_param_type(tp_udivsi3, 0, uint_tp); set_method_param_type(tp_udivsi3, 1, uint_tp); set_method_res_type(tp_udivsi3, 0, uint_tp); udivsi3 = create_compilerlib_entity(new_id_from_str("__udivsi3"), tp_udivsi3); ir_type *tp_modsi3 = new_type_method(2, 1); set_method_param_type(tp_modsi3, 0, int_tp); set_method_param_type(tp_modsi3, 1, int_tp); set_method_res_type(tp_modsi3, 0, int_tp); modsi3 = create_compilerlib_entity(new_id_from_str("__modsi3"), tp_modsi3); ir_type *tp_umodsi3 = new_type_method(2, 1); set_method_param_type(tp_umodsi3, 0, uint_tp); set_method_param_type(tp_umodsi3, 1, uint_tp); set_method_res_type(tp_umodsi3, 0, uint_tp); umodsi3 = create_compilerlib_entity(new_id_from_str("__umodsi3"), tp_umodsi3); }
static void arm_collect_frame_entity_nodes(ir_node *node, void *data) { if (!is_frame_load(node)) return; const arm_load_store_attr_t *attr = get_arm_load_store_attr_const(node); if (!attr->is_frame_entity) return; const ir_entity *entity = attr->entity; if (entity != NULL) return; const ir_mode *mode = attr->load_store_mode; const ir_type *type = get_type_for_mode(mode); be_fec_env_t *env = (be_fec_env_t*)data; be_load_needs_frame_entity(env, node, type); }
/** * Perform some fixups for variadic functions. * To make the rest of the frontend code easier to understand we add * "dummy" parameters until the number of parameters transmitted in registers. * (because otherwise the backend wouldn't store the value of the register * parameters into memory for the VLA magic) */ static bool sparc_variadic_fixups(ir_graph *const irg, calling_convention_t *const cconv) { ir_entity *entity = get_irg_entity(irg); ir_type *mtp = get_entity_type(entity); if (!is_method_variadic(mtp)) return false; if (cconv->n_param_regs >= SPARC_N_PARAM_REGS) return false; size_t const n_params = get_method_n_params(mtp); size_t const n_ress = get_method_n_ress(mtp); size_t const new_n_params = n_params + (SPARC_N_PARAM_REGS - cconv->n_param_regs); unsigned const cc_mask = get_method_calling_convention(mtp); mtp_additional_properties const props = get_method_additional_properties(mtp); ir_type *const new_mtp = new_type_method(new_n_params, n_ress, true, cc_mask, props); type_dbg_info *const dbgi = get_type_dbg_info(mtp); set_type_dbg_info(new_mtp, dbgi); for (size_t i = 0; i < n_ress; ++i) { ir_type *type = get_method_res_type(mtp, i); set_method_res_type(new_mtp, i, type); } for (size_t i = 0; i < n_params; ++i) { ir_type *type = get_method_param_type(mtp, i); set_method_param_type(new_mtp, i, type); } ir_type *const frame_type = get_irg_frame_type(irg); ir_mode *const gp_reg_mode = sparc_reg_classes[CLASS_sparc_gp].mode; ir_type *const gp_reg_type = get_type_for_mode(gp_reg_mode); for (size_t i = n_params; i < new_n_params; ++i) { set_method_param_type(new_mtp, i, gp_reg_type); new_parameter_entity(frame_type, i, gp_reg_type); } set_entity_type(entity, new_mtp); return true; }
void be_default_lower_va_arg(ir_node *const node, bool const compound_is_ptr, unsigned const stack_param_align) { ir_node *block = get_nodes_block(node); dbg_info *dbgi = get_irn_dbg_info(node); ir_graph *irg = get_irn_irg(node); ir_type *aptype = get_method_res_type(get_Builtin_type(node), 0); ir_node *const ap = get_irn_n(node, 1); ir_node *const node_mem = get_Builtin_mem(node); ir_mode *apmode = get_type_mode(aptype); ir_node *res; ir_node *new_mem; if (apmode) { goto load; } else if (compound_is_ptr) { apmode = mode_P; aptype = get_type_for_mode(apmode); load:; ir_node *const load = new_rd_Load(dbgi, block, node_mem, ap, apmode, aptype, cons_none); res = new_r_Proj(load, apmode, pn_Load_res); new_mem = new_r_Proj(load, mode_M,pn_Load_M); } else { /* aptype has no associated mode, so it is represented as a pointer. */ res = ap; new_mem = node_mem; } unsigned const round_up = round_up2(get_type_size(aptype), stack_param_align); ir_mode *const offset_mode = get_reference_offset_mode(mode_P); ir_node *const offset = new_r_Const_long(irg, offset_mode, round_up); ir_node *const new_ap = new_rd_Add(dbgi, block, ap, offset); ir_node *const in[] = { new_mem, res, new_ap }; turn_into_tuple(node, ARRAY_SIZE(in), in); }
calling_convention_t *sparc_decide_calling_convention(ir_type *function_type, ir_graph *irg) { bool omit_fp = false; if (irg != NULL) { omit_fp = be_options.omit_fp; /* our current vaarg handling needs the standard space to store the * args 0-5 in it */ if (is_method_variadic(function_type)) omit_fp = false; /* The pointer to the aggregate return value belongs to the 92 magic bytes. * Thus, if the called functions increases the stack size, * it must copy the value to the appropriate location. * This is not implemented yet, so we forbid to omit the frame pointer. */ if (get_method_calling_convention(function_type) & cc_compound_ret) omit_fp = false; if (omit_fp) irg_walk_graph(irg, check_omit_fp, NULL, &omit_fp); sparc_get_irg_data(irg)->omit_fp = omit_fp; } mtp_additional_properties mtp = get_method_additional_properties(function_type); unsigned *caller_saves = rbitset_malloc(N_SPARC_REGISTERS); if (mtp & mtp_property_returns_twice) { rbitset_copy(caller_saves, default_returns_twice_saves, N_SPARC_REGISTERS); } else { rbitset_copy(caller_saves, default_caller_saves, N_SPARC_REGISTERS); } /* determine how parameters are passed */ int n_params = get_method_n_params(function_type); int regnum = 0; reg_or_stackslot_t *params = XMALLOCNZ(reg_or_stackslot_t, n_params); int n_param_regs = ARRAY_SIZE(param_regs); unsigned stack_offset = !omit_fp ? SPARC_MIN_STACKSIZE : 0; for (int i = 0; i < n_params; ++i) { ir_type *param_type = get_method_param_type(function_type,i); ir_mode *mode; int bits; reg_or_stackslot_t *param; if (is_compound_type(param_type)) panic("compound arguments not supported yet"); mode = get_type_mode(param_type); bits = get_mode_size_bits(mode); param = ¶ms[i]; if (i == 0 && (get_method_calling_convention(function_type) & cc_compound_ret)) { assert(mode_is_reference(mode) && bits == 32); /* special case, we have reserved space for this on the between * type */ param->type = param_type; param->offset = SPARC_AGGREGATE_RETURN_OFFSET; param->already_stored = true; continue; } if (regnum < n_param_regs) { param->offset = SPARC_PARAMS_SPILL_OFFSET + regnum * SPARC_REGISTER_SIZE; param->type = param_type; arch_register_t const *reg = param_regs[regnum++]; if (irg == NULL || omit_fp) reg = map_i_to_o_reg(reg); param->reg0 = reg; param->req0 = reg->single_req; } else { param->type = param_type; param->offset = stack_offset; param->already_stored = true; /* increase offset by at least SPARC_REGISTER_SIZE bytes so * everything is aligned */ stack_offset += MAX(bits / 8, SPARC_REGISTER_SIZE); continue; } /* we might need a 2nd 32bit component (for 64bit or double values) */ if (bits > 32) { if (bits > 64) panic("only 32 and 64bit modes supported"); if (regnum < n_param_regs) { param->offset = SPARC_PARAMS_SPILL_OFFSET + regnum * SPARC_REGISTER_SIZE; arch_register_t const *reg = param_regs[regnum++]; if (irg == NULL || omit_fp) reg = map_i_to_o_reg(reg); param->reg1 = reg; param->req1 = reg->single_req; } else { ir_mode *regmode = param_regs[0]->cls->mode; ir_type *type = get_type_for_mode(regmode); param->type = type; param->offset = stack_offset; assert(get_mode_size_bits(regmode) == 32); stack_offset += SPARC_REGISTER_SIZE; } } } unsigned n_param_regs_used = regnum; /* determine how results are passed */ int n_results = get_method_n_ress(function_type); unsigned float_regnum = 0; unsigned n_reg_results = 0; unsigned n_float_result_regs = ARRAY_SIZE(float_result_regs); reg_or_stackslot_t *results = XMALLOCNZ(reg_or_stackslot_t, n_results); regnum = 0; for (int i = 0; i < n_results; ++i) { ir_type *result_type = get_method_res_type(function_type, i); ir_mode *result_mode = get_type_mode(result_type); reg_or_stackslot_t *result = &results[i]; if (mode_is_float(result_mode)) { unsigned n_regs = determine_n_float_regs(result_mode); unsigned next_reg = round_up2(float_regnum, n_regs); if (next_reg >= n_float_result_regs) { panic("too many float results"); } else { const arch_register_t *reg = float_result_regs[next_reg]; rbitset_clear(caller_saves, reg->global_index); if (n_regs == 1) { result->req0 = reg->single_req; } else if (n_regs == 2) { result->req0 = &float_result_reqs_double[next_reg]; rbitset_clear(caller_saves, reg->global_index+1); } else if (n_regs == 4) { result->req0 = &float_result_reqs_quad[next_reg]; rbitset_clear(caller_saves, reg->global_index+1); rbitset_clear(caller_saves, reg->global_index+2); rbitset_clear(caller_saves, reg->global_index+3); } else { panic("invalid number of registers in result"); } float_regnum = next_reg + n_regs; ++n_reg_results; } } else { if (get_mode_size_bits(result_mode) > 32) { panic("results with more than 32bits not supported yet"); } if (regnum >= n_param_regs) { panic("too many results"); } else { const arch_register_t *reg = param_regs[regnum++]; if (irg == NULL || omit_fp) reg = map_i_to_o_reg(reg); result->req0 = reg->single_req; rbitset_clear(caller_saves, reg->global_index); ++n_reg_results; } } } calling_convention_t *cconv = XMALLOCZ(calling_convention_t); cconv->n_parameters = n_params; cconv->parameters = params; cconv->param_stack_size = stack_offset - SPARC_MIN_STACKSIZE; cconv->n_param_regs = n_param_regs_used; cconv->results = results; cconv->omit_fp = omit_fp; cconv->caller_saves = caller_saves; cconv->n_reg_results = n_reg_results; /* setup ignore register array */ if (irg != NULL) { be_irg_t *birg = be_birg_from_irg(irg); birg->allocatable_regs = be_cconv_alloc_all_regs(&birg->obst, N_SPARC_REGISTERS); be_cconv_rem_regs(birg->allocatable_regs, ignore_regs, ARRAY_SIZE(ignore_regs)); } return cconv; }
calling_convention_t *arm_decide_calling_convention(const ir_graph *irg, ir_type *function_type) { /* determine how parameters are passed */ unsigned stack_offset = 0; size_t const n_param_regs = ARRAY_SIZE(param_regs); size_t const n_params = get_method_n_params(function_type); size_t regnum = 0; reg_or_stackslot_t *params = XMALLOCNZ(reg_or_stackslot_t, n_params); for (size_t i = 0; i < n_params; ++i) { ir_type *param_type = get_method_param_type(function_type,i); ir_mode *mode = get_type_mode(param_type); int bits = get_mode_size_bits(mode); reg_or_stackslot_t *param = ¶ms[i]; param->type = param_type; /* doubleword modes need to be passed in even registers */ if (param_type->flags & tf_lowered_dw) { if (regnum < n_param_regs) { if ((regnum & 1) != 0) ++regnum; } else { unsigned misalign = stack_offset % 8; if (misalign > 0) stack_offset += 8 - misalign; } } if (regnum < n_param_regs) { param->reg0 = param_regs[regnum++]; } else { param->offset = stack_offset; /* increase offset 4 bytes so everything is aligned */ stack_offset += MAX(bits / 8, 4); continue; } /* we might need a 2nd 32bit component (for 64bit or double values) */ if (bits > 32) { if (bits > 64) panic("only 32 and 64bit modes supported"); if (regnum < n_param_regs) { const arch_register_t *reg = param_regs[regnum++]; param->reg1 = reg; } else { ir_mode *pmode = param_regs[0]->cls->mode; ir_type *type = get_type_for_mode(pmode); param->type = type; param->offset = stack_offset; assert(get_mode_size_bits(pmode) == 32); stack_offset += 4; } } } unsigned const n_param_regs_used = regnum; size_t const n_result_regs= ARRAY_SIZE(result_regs); size_t const n_float_result_regs = ARRAY_SIZE(float_result_regs); size_t n_results = get_method_n_ress(function_type); size_t float_regnum = 0; reg_or_stackslot_t *results = XMALLOCNZ(reg_or_stackslot_t, n_results); regnum = 0; for (size_t i = 0; i < n_results; ++i) { ir_type *result_type = get_method_res_type(function_type, i); ir_mode *result_mode = get_type_mode(result_type); reg_or_stackslot_t *result = &results[i]; if (mode_is_float(result_mode)) { if (float_regnum >= n_float_result_regs) { panic("too many float results"); } else { const arch_register_t *reg = float_result_regs[float_regnum++]; result->reg0 = reg; } } else { if (get_mode_size_bits(result_mode) > 32) { panic("results with more than 32bits not supported yet"); } if (regnum >= n_result_regs) { panic("too many results"); } else { const arch_register_t *reg = result_regs[regnum++]; result->reg0 = reg; } } } calling_convention_t *cconv = XMALLOCZ(calling_convention_t); cconv->parameters = params; cconv->n_parameters = n_params; cconv->param_stack_size = stack_offset; cconv->n_param_regs = n_param_regs_used; cconv->results = results; /* setup allocatable registers */ if (irg != NULL) { be_irg_t *birg = be_birg_from_irg(irg); assert(birg->allocatable_regs == NULL); birg->allocatable_regs = be_cconv_alloc_all_regs(&birg->obst, N_ARM_REGISTERS); be_cconv_rem_regs(birg->allocatable_regs, ignore_regs, ARRAY_SIZE(ignore_regs)); arm_get_irg_data(irg)->omit_fp = true; } return cconv; }
/** * @return The lowered (floating point) type. */ static ir_type *lower_type(ir_type *tp) { ir_mode *mode = get_type_mode(tp); ir_mode *lowered_mode = get_lowered_mode(mode); return get_type_for_mode(lowered_mode); }