static void lower_copyb_node(ir_node *irn) { ir_type *tp = get_CopyB_type(irn); unsigned size = get_type_size_bytes(tp); if (size <= max_small_size) lower_small_copyb_node(irn); else if (size >= min_large_size) lower_large_copyb_node(irn); else panic("CopyB of invalid size"); }
static void introduce_epilog(ir_node *ret) { arch_register_t const *const sp_reg = &arm_registers[REG_SP]; assert(arch_get_irn_register_req_in(ret, n_arm_Return_sp) == sp_reg->single_req); ir_node *const sp = get_irn_n(ret, n_arm_Return_sp); ir_node *const block = get_nodes_block(ret); ir_graph *const irg = get_irn_irg(ret); ir_type *const frame_type = get_irg_frame_type(irg); unsigned const frame_size = get_type_size_bytes(frame_type); ir_node *const incsp = be_new_IncSP(sp_reg, block, sp, -frame_size, 0); set_irn_n(ret, n_arm_Return_sp, incsp); sched_add_before(ret, incsp); }
ir_node *dmemory_default_alloc_array(ir_type *eltype, ir_node *count, ir_graph *irg, ir_node *block, ir_node **mem) { ir_node *cur_mem = *mem; unsigned count_size = get_mode_size_bytes(default_arraylength_mode); unsigned element_size = is_Class_type(eltype) ? get_mode_size_bytes(mode_P) : get_type_size_bytes(eltype); // FIXME: some langs support arrays of structs. /* increase element count so we have enough space for a counter at the front */ unsigned add_size = (element_size + (count_size-1)) / count_size; ir_node *count_u = new_r_Conv(block, count, mode_Iu); ir_node *addv = new_r_Const_long(irg, mode_Iu, add_size); ir_node *add1 = new_r_Add(block, count_u, addv, mode_Iu); ir_node *elsizev = new_r_Const_long(irg, mode_Iu, element_size); ir_node *size = new_r_Mul(block, add1, elsizev, mode_Iu); unsigned addr_delta = add_size * element_size; symconst_symbol calloc_sym; calloc_sym.entity_p = calloc_entity; ir_node *callee = new_r_SymConst(irg, mode_P, calloc_sym, symconst_addr_ent); ir_node *one = new_r_Const_long(irg, mode_Iu, 1); ir_node *in[2] = { one, size }; ir_type *call_type = get_entity_type(calloc_entity); ir_node *call = new_r_Call(block, cur_mem, callee, 2, in, call_type); cur_mem = new_r_Proj(call, mode_M, pn_Call_M); ir_node *ress = new_r_Proj(call, mode_T, pn_Call_T_result); ir_node *res = new_r_Proj(ress, mode_P, 0); /* write length of array */ ir_node *len_value = new_r_Conv(block, count, default_arraylength_mode); ir_node *len_delta = new_r_Const_long(irg, mode_P, (int)addr_delta-4); //FIXME: replace magic num ir_node *len_addr = new_r_Add(block, res, len_delta, mode_P); ir_node *store = new_r_Store(block, cur_mem, len_addr, len_value, cons_none); cur_mem = new_r_Proj(store, mode_M, pn_Store_M); if (addr_delta > 0) { ir_node *delta = new_r_Const_long(irg, mode_P, (int)addr_delta); res = new_r_Add(block, res, delta, mode_P); } *mem = cur_mem; return res; }
/** * Post-Walker: find CopyB nodes. */ static void find_copyb_nodes(ir_node *irn, void *ctx) { if (!is_CopyB(irn)) return; ir_type *tp = get_CopyB_type(irn); if (get_type_state(tp) != layout_fixed) return; unsigned size = get_type_size_bytes(tp); bool medium_sized = max_small_size < size && size < min_large_size; if (medium_sized) return; /* Nothing to do for medium-sized CopyBs. */ /* Okay, either small or large CopyB, so link it in and lower it later. */ walk_env_t *env = (walk_env_t*)ctx; ARR_APP1(ir_node*, env->copybs, irn); }
/** * Turn a small CopyB node into a series of Load/Store nodes. */ static void lower_small_copyb_node(ir_node *irn) { ir_graph *irg = get_irn_irg(irn); ir_node *block = get_nodes_block(irn); ir_type *tp = get_CopyB_type(irn); ir_node *addr_src = get_CopyB_src(irn); ir_node *addr_dst = get_CopyB_dst(irn); ir_node *mem = get_CopyB_mem(irn); ir_mode *mode_ref = get_irn_mode(addr_src); unsigned mode_bytes = allow_misalignments ? native_mode_bytes : get_type_alignment_bytes(tp); unsigned size = get_type_size_bytes(tp); unsigned offset = 0; while (offset < size) { ir_mode *mode = get_ir_mode(mode_bytes); for (; offset + mode_bytes <= size; offset += mode_bytes) { ir_mode *mode_ref_int = get_reference_offset_mode(mode_ref); /* construct offset */ ir_node *addr_const = new_r_Const_long(irg, mode_ref_int, offset); ir_node *add = new_r_Add(block, addr_src, addr_const, mode_ref); ir_node *load = new_r_Load(block, mem, add, mode, tp, cons_none); ir_node *load_res = new_r_Proj(load, mode, pn_Load_res); ir_node *load_mem = new_r_Proj(load, mode_M, pn_Load_M); ir_node *addr_const2 = new_r_Const_long(irg, mode_ref_int, offset); ir_node *add2 = new_r_Add(block, addr_dst, addr_const2, mode_ref); ir_node *store = new_r_Store(block, load_mem, add2, load_res, tp, cons_none); ir_node *store_mem = new_r_Proj(store, mode_M, pn_Store_M); mem = store_mem; } mode_bytes /= 2; } exchange(irn, mem); }
/** * Turn a large CopyB node into a memcpy call. */ static void lower_large_copyb_node(ir_node *irn) { ir_graph *irg = get_irn_irg(irn); ir_node *block = get_nodes_block(irn); dbg_info *dbgi = get_irn_dbg_info(irn); ir_node *mem = get_CopyB_mem(irn); ir_node *addr_src = get_CopyB_src(irn); ir_node *addr_dst = get_CopyB_dst(irn); ir_type *copyb_tp = get_CopyB_type(irn); unsigned size = get_type_size_bytes(copyb_tp); ir_node *callee = get_memcpy_address(irg); ir_type *call_tp = get_memcpy_methodtype(); ir_mode *mode_size_t = get_ir_mode(native_mode_bytes); ir_node *size_cnst = new_r_Const_long(irg, mode_size_t, size); ir_node *in[] = { addr_dst, addr_src, size_cnst }; ir_node *call = new_rd_Call(dbgi, block, mem, callee, ARRAY_SIZE(in), in, call_tp); ir_node *call_mem = new_r_Proj(call, mode_M, pn_Call_M); exchange(irn, call_mem); }
x86_cconv_t *ia32_decide_calling_convention(ir_type *function_type, ir_graph *irg) { bool omit_fp = false; if (irg != NULL) { omit_fp = be_options.omit_fp; if (omit_fp) irg_walk_graph(irg, check_omit_fp, NULL, &omit_fp); } mtp_additional_properties mtp = get_method_additional_properties(function_type); (void)mtp; /* TODO: do something with cc_reg_param/cc_this_call */ unsigned *caller_saves = rbitset_malloc(N_IA32_REGISTERS); unsigned *callee_saves = rbitset_malloc(N_IA32_REGISTERS); rbitset_copy(caller_saves, default_caller_saves, N_IA32_REGISTERS); rbitset_copy(callee_saves, default_callee_saves, N_IA32_REGISTERS); /* determine how parameters are passed */ unsigned n_params = get_method_n_params(function_type); unsigned param_regnum = 0; unsigned float_param_regnum = 0; reg_or_stackslot_t *params = XMALLOCNZ(reg_or_stackslot_t, n_params); unsigned n_param_regs = ARRAY_SIZE(default_param_regs); unsigned n_float_param_regs = ARRAY_SIZE(float_param_regs); unsigned stack_offset = 0; for (unsigned i = 0; i < n_params; ++i) { ir_type *param_type = get_method_param_type(function_type, i); reg_or_stackslot_t *param = ¶ms[i]; if (is_aggregate_type(param_type)) { param->type = param_type; param->offset = stack_offset; stack_offset += get_type_size_bytes(param_type); goto align_stack; } ir_mode *mode = get_type_mode(param_type); if (mode_is_float(mode) && float_param_regnum < n_float_param_regs) { param->reg = float_param_regs[float_param_regnum++]; } else if (!mode_is_float(mode) && param_regnum < n_param_regs) { param->reg = default_param_regs[param_regnum++]; } else { param->type = param_type; param->offset = stack_offset; stack_offset += get_type_size_bytes(param_type); align_stack:; /* increase offset by at least IA32_REGISTER_SIZE bytes so * everything is aligned */ unsigned misalign = stack_offset % IA32_REGISTER_SIZE; if (misalign > 0) stack_offset += IA32_REGISTER_SIZE - misalign; } } unsigned n_param_regs_used = param_regnum + float_param_regnum; /* determine how results are passed */ unsigned n_results = get_method_n_ress(function_type); unsigned n_reg_results = 0; reg_or_stackslot_t *results = XMALLOCNZ(reg_or_stackslot_t, n_results); unsigned res_regnum = 0; unsigned res_float_regnum = 0; unsigned n_result_regs = ARRAY_SIZE(result_regs); unsigned n_float_result_regs = ARRAY_SIZE(float_result_regs); for (size_t i = 0; i < n_results; ++i) { ir_type *result_type = get_method_res_type(function_type, i); ir_mode *result_mode = get_type_mode(result_type); reg_or_stackslot_t *result = &results[i]; const arch_register_t *reg; if (mode_is_float(result_mode)) { if (res_float_regnum >= n_float_result_regs) { panic("too many floating points results"); } reg = float_result_regs[res_float_regnum++]; } else { if (res_regnum >= n_result_regs) { panic("too many results"); } reg = result_regs[res_regnum++]; } result->reg = reg; rbitset_clear(caller_saves, reg->global_index); ++n_reg_results; } calling_convention cc = get_method_calling_convention(function_type); x86_cconv_t *cconv = XMALLOCZ(x86_cconv_t); cconv->sp_delta = (cc & cc_compound_ret) && !(cc & cc_reg_param) ? IA32_REGISTER_SIZE : 0; cconv->parameters = params; cconv->n_parameters = n_params; cconv->callframe_size = stack_offset; cconv->n_param_regs = n_param_regs_used; cconv->n_xmm_regs = float_param_regnum; cconv->results = results; cconv->omit_fp = omit_fp; cconv->caller_saves = caller_saves; cconv->callee_saves = callee_saves; cconv->n_reg_results = n_reg_results; if (irg != NULL) { be_irg_t *birg = be_birg_from_irg(irg); size_t n_ignores = ARRAY_SIZE(ignore_regs); struct obstack *obst = &birg->obst; birg->allocatable_regs = rbitset_obstack_alloc(obst, N_IA32_REGISTERS); rbitset_set_all(birg->allocatable_regs, N_IA32_REGISTERS); for (size_t r = 0; r < n_ignores; ++r) { rbitset_clear(birg->allocatable_regs, ignore_regs[r]); } if (!omit_fp) rbitset_clear(birg->allocatable_regs, REG_EBP); } return cconv; }
/** * Lower a Sel node. Do not touch Sels accessing entities on the frame type. */ static void lower_sel(ir_node *sel) { ir_graph *irg = get_irn_irg(sel); ir_entity *ent = get_Sel_entity(sel); ir_type *owner = get_entity_owner(ent); dbg_info *dbg = get_irn_dbg_info(sel); ir_mode *mode = get_irn_mode(sel); ir_node *bl = get_nodes_block(sel); ir_node *newn; /* we can only replace Sels when the layout of the owner type is decided. */ if (get_type_state(owner) != layout_fixed) return; if (0 < get_Sel_n_indexs(sel)) { /* an Array access */ ir_type *basetyp = get_entity_type(ent); ir_mode *basemode; ir_node *index; if (is_Primitive_type(basetyp)) basemode = get_type_mode(basetyp); else basemode = mode_P_data; assert(basemode && "no mode for lowering Sel"); assert((get_mode_size_bits(basemode) % 8 == 0) && "can not deal with unorthodox modes"); index = get_Sel_index(sel, 0); if (is_Array_type(owner)) { ir_type *arr_ty = owner; size_t dims = get_array_n_dimensions(arr_ty); size_t *map = ALLOCAN(size_t, dims); ir_mode *mode_Int = get_reference_mode_signed_eq(mode); ir_tarval *tv; ir_node *last_size; size_t i; assert(dims == (size_t)get_Sel_n_indexs(sel) && "array dimension must match number of indices of Sel node"); for (i = 0; i < dims; i++) { size_t order = get_array_order(arr_ty, i); assert(order < dims && "order of a dimension must be smaller than the arrays dim"); map[order] = i; } newn = get_Sel_ptr(sel); /* Size of the array element */ tv = new_tarval_from_long(get_type_size_bytes(basetyp), mode_Int); last_size = new_rd_Const(dbg, irg, tv); /* * We compute the offset part of dimension d_i recursively * with the the offset part of dimension d_{i-1} * * off_0 = sizeof(array_element_type); * off_i = (u_i - l_i) * off_{i-1} ; i >= 1 * * whereas u_i is the upper bound of the current dimension * and l_i the lower bound of the current dimension. */ for (i = dims; i > 0;) { size_t dim = map[--i]; ir_node *lb, *ub, *elms, *n, *ind; elms = NULL; lb = get_array_lower_bound(arr_ty, dim); ub = get_array_upper_bound(arr_ty, dim); if (! is_Unknown(lb)) lb = new_rd_Conv(dbg, bl, copy_const_value(get_irn_dbg_info(sel), lb, bl), mode_Int); else lb = NULL; if (! is_Unknown(ub)) ub = new_rd_Conv(dbg, bl, copy_const_value(get_irn_dbg_info(sel), ub, bl), mode_Int); else ub = NULL; /* * If the array has more than one dimension, lower and upper * bounds have to be set in the non-last dimension. */ if (i > 0) { assert(lb != NULL && "lower bound has to be set in multi-dim array"); assert(ub != NULL && "upper bound has to be set in multi-dim array"); /* Elements in one Dimension */ elms = new_rd_Sub(dbg, bl, ub, lb, mode_Int); } ind = new_rd_Conv(dbg, bl, get_Sel_index(sel, dim), mode_Int); /* * Normalize index, id lower bound is set, also assume * lower bound == 0 */ if (lb != NULL) ind = new_rd_Sub(dbg, bl, ind, lb, mode_Int); n = new_rd_Mul(dbg, bl, ind, last_size, mode_Int); /* * see comment above. */ if (i > 0) last_size = new_rd_Mul(dbg, bl, last_size, elms, mode_Int); newn = new_rd_Add(dbg, bl, newn, n, mode); } } else { /* no array type */ ir_mode *idx_mode = get_irn_mode(index); ir_tarval *tv = new_tarval_from_long(get_mode_size_bytes(basemode), idx_mode); newn = new_rd_Add(dbg, bl, get_Sel_ptr(sel), new_rd_Mul(dbg, bl, index, new_r_Const(irg, tv), idx_mode), mode); } } else if (is_Method_type(get_entity_type(ent)) && is_Class_type(owner)) { /* We need an additional load when accessing methods from a dispatch * table. * Matze TODO: Is this really still used? At least liboo does its own * lowering of Method-Sels... */ ir_mode *ent_mode = get_type_mode(get_entity_type(ent)); int offset = get_entity_offset(ent); ir_mode *mode_Int = get_reference_mode_signed_eq(mode); ir_tarval *tv = new_tarval_from_long(offset, mode_Int); ir_node *cnst = new_rd_Const(dbg, irg, tv); ir_node *add = new_rd_Add(dbg, bl, get_Sel_ptr(sel), cnst, mode); ir_node *mem = get_Sel_mem(sel); newn = new_rd_Load(dbg, bl, mem, add, ent_mode, cons_none); newn = new_r_Proj(newn, ent_mode, pn_Load_res); } else { int offset = get_entity_offset(ent); /* replace Sel by add(obj, const(ent.offset)) */ newn = get_Sel_ptr(sel); if (offset != 0) { ir_mode *mode_UInt = get_reference_mode_unsigned_eq(mode); ir_tarval *tv = new_tarval_from_long(offset, mode_UInt); ir_node *cnst = new_r_Const(irg, tv); newn = new_rd_Add(dbg, bl, newn, cnst, mode); } } /* run the hooks */ hook_lower(sel); exchange(sel, newn); }
/** * Lower a all possible SymConst nodes. */ static void lower_symconst(ir_node *symc) { ir_node *newn; ir_type *tp; ir_entity *ent; ir_tarval *tv; ir_enum_const *ec; ir_mode *mode; ir_graph *irg; switch (get_SymConst_kind(symc)) { case symconst_type_size: /* rewrite the SymConst node by a Const node */ irg = get_irn_irg(symc); tp = get_SymConst_type(symc); assert(get_type_state(tp) == layout_fixed); mode = get_irn_mode(symc); newn = new_r_Const_long(irg, mode, get_type_size_bytes(tp)); assert(newn); /* run the hooks */ hook_lower(symc); exchange(symc, newn); break; case symconst_type_align: /* rewrite the SymConst node by a Const node */ irg = get_irn_irg(symc); tp = get_SymConst_type(symc); assert(get_type_state(tp) == layout_fixed); mode = get_irn_mode(symc); newn = new_r_Const_long(irg, mode, get_type_alignment_bytes(tp)); assert(newn); /* run the hooks */ hook_lower(symc); exchange(symc, newn); break; case symconst_addr_ent: /* leave */ break; case symconst_ofs_ent: /* rewrite the SymConst node by a Const node */ irg = get_irn_irg(symc); ent = get_SymConst_entity(symc); assert(get_type_state(get_entity_type(ent)) == layout_fixed); mode = get_irn_mode(symc); newn = new_r_Const_long(irg, mode, get_entity_offset(ent)); assert(newn); /* run the hooks */ hook_lower(symc); exchange(symc, newn); break; case symconst_enum_const: /* rewrite the SymConst node by a Const node */ irg = get_irn_irg(symc); ec = get_SymConst_enum(symc); assert(get_type_state(get_enumeration_owner(ec)) == layout_fixed); tv = get_enumeration_value(ec); newn = new_r_Const(irg, tv); assert(newn); /* run the hooks */ hook_lower(symc); exchange(symc, newn); break; default: assert(!"unknown SymConst kind"); break; } }
void init_predefined_types(void) { static const type_base_t error = { TYPE_ERROR, TYPE_QUALIFIER_NONE, NULL }; type_error_type = (type_t*)&error; type_bool = make_atomic_type(ATOMIC_TYPE_BOOL, TYPE_QUALIFIER_NONE); type_signed_char = make_atomic_type(ATOMIC_TYPE_SCHAR, TYPE_QUALIFIER_NONE); type_unsigned_char = make_atomic_type(ATOMIC_TYPE_UCHAR, TYPE_QUALIFIER_NONE); type_short = make_atomic_type(ATOMIC_TYPE_SHORT, TYPE_QUALIFIER_NONE); type_unsigned_short = make_atomic_type(ATOMIC_TYPE_USHORT, TYPE_QUALIFIER_NONE); type_int = make_atomic_type(ATOMIC_TYPE_INT, TYPE_QUALIFIER_NONE); type_unsigned_int = make_atomic_type(ATOMIC_TYPE_UINT, TYPE_QUALIFIER_NONE); type_long = make_atomic_type(ATOMIC_TYPE_LONG, TYPE_QUALIFIER_NONE); type_unsigned_long = make_atomic_type(ATOMIC_TYPE_ULONG, TYPE_QUALIFIER_NONE); type_long_long = make_atomic_type(ATOMIC_TYPE_LONGLONG, TYPE_QUALIFIER_NONE); type_unsigned_long_long = make_atomic_type(ATOMIC_TYPE_ULONGLONG, TYPE_QUALIFIER_NONE); type_long_double = make_atomic_type(ATOMIC_TYPE_LONG_DOUBLE, TYPE_QUALIFIER_NONE); type_double = make_atomic_type(ATOMIC_TYPE_DOUBLE, TYPE_QUALIFIER_NONE); type_float = make_atomic_type(ATOMIC_TYPE_FLOAT, TYPE_QUALIFIER_NONE); type_char = make_atomic_type(ATOMIC_TYPE_CHAR, TYPE_QUALIFIER_NONE); type_void = make_void_type(TYPE_QUALIFIER_NONE); type_const_void = make_void_type(TYPE_QUALIFIER_CONST); type_builtin_template = allocate_type_zero(TYPE_BUILTIN_TEMPLATE); type_builtin_template = identify_new_type(type_builtin_template); int8_type_kind = find_signed_int_atomic_type_kind_for_size(1); int16_type_kind = find_signed_int_atomic_type_kind_for_size(2); int32_type_kind = find_signed_int_atomic_type_kind_for_size(4); int64_type_kind = find_signed_int_atomic_type_kind_for_size(8); type_int32_t = make_atomic_type(int32_type_kind, TYPE_QUALIFIER_NONE); type_int64_t = make_atomic_type(int64_type_kind, TYPE_QUALIFIER_NONE); /* pointer types */ type_void_ptr = make_pointer_type(type_void, TYPE_QUALIFIER_NONE); type_const_void_ptr = make_pointer_type(type_const_void, TYPE_QUALIFIER_NONE); type_void_ptr_restrict = make_pointer_type(type_void, TYPE_QUALIFIER_RESTRICT); type_const_void_ptr_restrict = make_pointer_type(type_const_void, TYPE_QUALIFIER_RESTRICT); type_char_ptr = make_pointer_type(type_char, TYPE_QUALIFIER_NONE); type_char_ptr_restrict = make_pointer_type(type_char, TYPE_QUALIFIER_RESTRICT); type_signed_char_ptr = make_pointer_type(type_signed_char, TYPE_QUALIFIER_NONE); type_short_ptr = make_pointer_type(type_short, TYPE_QUALIFIER_NONE); type_int_ptr = make_pointer_type(type_int, TYPE_QUALIFIER_NONE); type_long_ptr = make_pointer_type(type_long, TYPE_QUALIFIER_NONE); type_unsigned_char_ptr = make_pointer_type(type_unsigned_char, TYPE_QUALIFIER_NONE); type_unsigned_short_ptr = make_pointer_type(type_unsigned_short, TYPE_QUALIFIER_NONE); type_unsigned_int_ptr = make_pointer_type(type_unsigned_int, TYPE_QUALIFIER_NONE); type_unsigned_long_ptr = make_pointer_type(type_unsigned_long, TYPE_QUALIFIER_NONE); type_unsigned_long_long_ptr = make_pointer_type(type_unsigned_long, TYPE_QUALIFIER_NONE); type_long_long_ptr = make_pointer_type(type_long_long, TYPE_QUALIFIER_NONE); type_long_double_ptr = make_pointer_type(type_long_double, TYPE_QUALIFIER_NONE); type_double_ptr = make_pointer_type(type_double, TYPE_QUALIFIER_NONE); type_float_ptr = make_pointer_type(type_float, TYPE_QUALIFIER_NONE); type_char_ptr_ptr = make_pointer_type(type_char_ptr, TYPE_QUALIFIER_NONE); type_builtin_template_ptr = make_pointer_type(type_builtin_template, TYPE_QUALIFIER_NONE); backend_params const *const be_params = be_get_backend_param(); ir_type *be_va_list_type = be_params->vararg.va_list_type; if (!be_va_list_type) { /* Backend has no vararg support. Just hope the the program will not be * using any. If it does, the parse_va_* functions will complain. */ type_valist = type_error_type; type_valist_arg = type_error_type; } else if (is_Pointer_type(be_va_list_type)) { type_valist = type_void_ptr; type_valist_arg = type_void_ptr; } else if (is_Struct_type(be_va_list_type)) { entity_t *ent = allocate_entity_zero(ENTITY_STRUCT, NAMESPACE_NORMAL, sym_anonymous, &builtin_position); ent->compound.alignment = get_type_alignment_bytes(be_va_list_type); ent->compound.size = get_type_size_bytes(be_va_list_type); ent->compound.complete = true; ent->compound.members = (scope_t){ .first_entity = NULL, .last_entity = NULL, .depth = 0 }; type_t *type_valist_struct = allocate_type_zero(TYPE_COMPOUND_STRUCT); type_valist_struct->base.firm_type = be_va_list_type; type_valist_struct->compound.compound = &ent->compound; type_valist = make_array_type(type_valist_struct, 1, TYPE_QUALIFIER_NONE); type_valist_arg = automatic_type_conversion(type_valist); } /* const character types */ type_const_char = make_atomic_type(ATOMIC_TYPE_CHAR, TYPE_QUALIFIER_CONST); type_const_char_ptr = make_pointer_type(type_const_char, TYPE_QUALIFIER_NONE); type_const_char_ptr_restrict = make_pointer_type(type_const_char, TYPE_QUALIFIER_RESTRICT); atomic_type_kind_t pointer_sized_int = dialect.pointer_sized_int; atomic_type_kind_t pointer_sized_uint = dialect.pointer_sized_uint; type_size_t = make_atomic_type(pointer_sized_uint, TYPE_QUALIFIER_NONE); type_ssize_t = make_atomic_type(pointer_sized_int, TYPE_QUALIFIER_NONE); type_uptrdiff_t = type_size_t; type_ptrdiff_t = type_ssize_t; type_intmax_t = type_long_long; type_uintmax_t = type_unsigned_long_long; type_wint_t = type_unsigned_int; type_intmax_t_ptr = make_pointer_type(type_intmax_t, TYPE_QUALIFIER_NONE); type_uintmax_t_ptr = make_pointer_type(type_uintmax_t, TYPE_QUALIFIER_NONE); type_ptrdiff_t_ptr = make_pointer_type(type_ptrdiff_t, TYPE_QUALIFIER_NONE); type_uptrdiff_t_ptr = make_pointer_type(type_uptrdiff_t, TYPE_QUALIFIER_NONE); type_ssize_t_ptr = make_pointer_type(type_ssize_t, TYPE_QUALIFIER_NONE); type_size_t_ptr = make_pointer_type(type_size_t, TYPE_QUALIFIER_NONE); atomic_type_kind_t akind = dialect.cpp ? ATOMIC_TYPE_WCHAR_T : dialect.wchar_atomic_kind; type_wchar_t = make_atomic_type(akind, TYPE_QUALIFIER_NONE); type_const_wchar_t = make_atomic_type(akind, TYPE_QUALIFIER_CONST); type_wchar_t_ptr = make_pointer_type(type_wchar_t, TYPE_QUALIFIER_NONE); type_const_wchar_t_ptr = make_pointer_type(type_const_wchar_t, TYPE_QUALIFIER_NONE); atomic_type_kind_t const u2 = find_unsigned_int_atomic_type_kind_for_size(2); type_char16_t = make_atomic_type(u2, TYPE_QUALIFIER_NONE); type_char16_t_const = make_atomic_type(u2, TYPE_QUALIFIER_CONST); type_char16_t_ptr = make_pointer_type(type_char16_t, TYPE_QUALIFIER_NONE); type_char16_t_const_ptr = make_pointer_type(type_char16_t_const, TYPE_QUALIFIER_NONE); atomic_type_kind_t const u4 = find_unsigned_int_atomic_type_kind_for_size(4); type_char32_t = make_atomic_type(u4, TYPE_QUALIFIER_NONE); type_char32_t_const = make_atomic_type(u4, TYPE_QUALIFIER_CONST); type_char32_t_ptr = make_pointer_type(type_char32_t, TYPE_QUALIFIER_NONE); type_char32_t_const_ptr = make_pointer_type(type_char32_t_const, TYPE_QUALIFIER_NONE); if (dialect.ms) init_ms_types(); }
} static void introduce_prolog_epilog(ir_graph *irg) { /* introduce epilog for every return node */ foreach_irn_in(get_irg_end_block(irg), i, ret) { assert(is_arm_Return(ret)); introduce_epilog(ret); } const arch_register_t *sp_reg = &arm_registers[REG_SP]; ir_node *start = get_irg_start(irg); ir_node *block = get_nodes_block(start); ir_node *initial_sp = be_get_Start_proj(irg, sp_reg); ir_type *frame_type = get_irg_frame_type(irg); unsigned frame_size = get_type_size_bytes(frame_type); ir_node *const incsp = be_new_IncSP(sp_reg, block, initial_sp, frame_size, 0); edges_reroute_except(initial_sp, incsp, incsp); sched_add_after(start, incsp); } static int get_first_same(const arch_register_req_t* req) { const unsigned other = req->should_be_same; for (int i = 0; i < 32; ++i) { if (other & (1U << i)) return i; } panic("same position not found"); }