ir_entity *new_d_entity(ir_type *owner, ident *name, ir_type *type, dbg_info *db) { ir_entity *res; if (is_Method_type(type)) { ir_graph *irg = get_const_code_irg(); symconst_symbol sym; res = intern_new_entity(owner, IR_ENTITY_METHOD, name, type, db); sym.entity_p = res; set_atomic_ent_value(res, new_r_SymConst(irg, mode_P_code, sym, symconst_addr_ent)); res->linkage = IR_LINKAGE_CONSTANT; res->attr.mtd_attr.properties = get_method_additional_properties(type); res->attr.mtd_attr.vtable_number = IR_VTABLE_NUM_NOT_SET; res->attr.mtd_attr.param_access = NULL; res->attr.mtd_attr.param_weight = NULL; res->attr.mtd_attr.irg = NULL; } else if (is_compound_type(owner) && !(owner->flags & tf_segment)) { res = intern_new_entity(owner, IR_ENTITY_COMPOUND_MEMBER, name, type, db); } else { res = intern_new_entity(owner, IR_ENTITY_NORMAL, name, type, db); } hook_new_entity(res); return res; }
static ir_entity *intern_new_entity(ir_type *owner, ir_entity_kind kind, ident *name, ir_type *type, dbg_info *dbgi) { assert(owner != NULL); ir_entity *res = XMALLOCZ(ir_entity); res->kind = k_entity; res->name = name; res->ld_name = NULL; res->type = type; res->owner = owner; res->entity_kind = kind; res->volatility = volatility_non_volatile; res->aligned = align_is_aligned; res->usage = ir_usage_unknown; res->compiler_gen = 0; res->visibility = ir_visibility_external; res->offset = -1; res->offset_bit_remainder = 0; res->alignment = 0; res->link = NULL; #ifdef DEBUG_libfirm res->nr = get_irp_new_node_nr(); #endif /* Remember entity in its owner. */ if (is_compound_type(owner)) add_compound_member(owner, res); res->visit = 0; set_entity_dbg_info(res, dbgi); return res; }
std::string tv_to_string(caValue* value) { ca_assert(is_list(value)); if (is_compound_type(value->value_type)) return compound_type_to_string(value); return list_to_string((ListData*) get_pointer(value)); }
void set_entity_owner(ir_entity *ent, ir_type *owner) { assert(is_entity(ent)); assert(is_compound_type(owner)); remove_compound_member(ent->owner, ent); add_compound_member(owner, ent); ent->owner = owner; }
void free_entity(ir_entity *ent) { if (is_compound_type(ent->owner)) remove_compound_member(ent->owner, ent); assert(ent->kind == k_entity); free_entity_attrs(ent); #ifdef DEBUG_libfirm ent->kind = k_BAD; #endif free(ent); }
ir_entity *copy_entity_own(ir_entity *old, ir_type *new_owner) { assert(is_entity(old)); assert(is_compound_type(new_owner)); assert(get_type_state(new_owner) != layout_fixed); if (old->owner == new_owner) return old; /* create a deep copy so we are safe of aliasing and double-freeing. */ ir_entity *newe = deep_entity_copy(old); newe->owner = new_owner; add_compound_member(new_owner, newe); return newe; }
static void check_entity_initializer(ir_entity *entity) { #ifndef NDEBUG ir_initializer_t *initializer = entity->initializer; ir_type *entity_tp = get_entity_type(entity); switch (initializer->kind) { case IR_INITIALIZER_COMPOUND: assert(is_compound_type(entity_tp) || is_Array_type(entity_tp)); break; case IR_INITIALIZER_CONST: /* methods are initialized by a SymConst */ assert(is_atomic_type(entity_tp) || is_Method_type(entity_tp)); break; case IR_INITIALIZER_TARVAL: assert(is_atomic_type(entity_tp)); break; case IR_INITIALIZER_NULL: break; } #else (void)entity; #endif }
calling_convention_t *sparc_decide_calling_convention(ir_type *function_type, ir_graph *irg) { bool omit_fp = false; if (irg != NULL) { omit_fp = be_options.omit_fp; /* our current vaarg handling needs the standard space to store the * args 0-5 in it */ if (is_method_variadic(function_type)) omit_fp = false; /* The pointer to the aggregate return value belongs to the 92 magic bytes. * Thus, if the called functions increases the stack size, * it must copy the value to the appropriate location. * This is not implemented yet, so we forbid to omit the frame pointer. */ if (get_method_calling_convention(function_type) & cc_compound_ret) omit_fp = false; if (omit_fp) irg_walk_graph(irg, check_omit_fp, NULL, &omit_fp); sparc_get_irg_data(irg)->omit_fp = omit_fp; } mtp_additional_properties mtp = get_method_additional_properties(function_type); unsigned *caller_saves = rbitset_malloc(N_SPARC_REGISTERS); if (mtp & mtp_property_returns_twice) { rbitset_copy(caller_saves, default_returns_twice_saves, N_SPARC_REGISTERS); } else { rbitset_copy(caller_saves, default_caller_saves, N_SPARC_REGISTERS); } /* determine how parameters are passed */ int n_params = get_method_n_params(function_type); int regnum = 0; reg_or_stackslot_t *params = XMALLOCNZ(reg_or_stackslot_t, n_params); int n_param_regs = ARRAY_SIZE(param_regs); unsigned stack_offset = !omit_fp ? SPARC_MIN_STACKSIZE : 0; for (int i = 0; i < n_params; ++i) { ir_type *param_type = get_method_param_type(function_type,i); ir_mode *mode; int bits; reg_or_stackslot_t *param; if (is_compound_type(param_type)) panic("compound arguments not supported yet"); mode = get_type_mode(param_type); bits = get_mode_size_bits(mode); param = ¶ms[i]; if (i == 0 && (get_method_calling_convention(function_type) & cc_compound_ret)) { assert(mode_is_reference(mode) && bits == 32); /* special case, we have reserved space for this on the between * type */ param->type = param_type; param->offset = SPARC_AGGREGATE_RETURN_OFFSET; param->already_stored = true; continue; } if (regnum < n_param_regs) { param->offset = SPARC_PARAMS_SPILL_OFFSET + regnum * SPARC_REGISTER_SIZE; param->type = param_type; arch_register_t const *reg = param_regs[regnum++]; if (irg == NULL || omit_fp) reg = map_i_to_o_reg(reg); param->reg0 = reg; param->req0 = reg->single_req; } else { param->type = param_type; param->offset = stack_offset; param->already_stored = true; /* increase offset by at least SPARC_REGISTER_SIZE bytes so * everything is aligned */ stack_offset += MAX(bits / 8, SPARC_REGISTER_SIZE); continue; } /* we might need a 2nd 32bit component (for 64bit or double values) */ if (bits > 32) { if (bits > 64) panic("only 32 and 64bit modes supported"); if (regnum < n_param_regs) { param->offset = SPARC_PARAMS_SPILL_OFFSET + regnum * SPARC_REGISTER_SIZE; arch_register_t const *reg = param_regs[regnum++]; if (irg == NULL || omit_fp) reg = map_i_to_o_reg(reg); param->reg1 = reg; param->req1 = reg->single_req; } else { ir_mode *regmode = param_regs[0]->cls->mode; ir_type *type = get_type_for_mode(regmode); param->type = type; param->offset = stack_offset; assert(get_mode_size_bits(regmode) == 32); stack_offset += SPARC_REGISTER_SIZE; } } } unsigned n_param_regs_used = regnum; /* determine how results are passed */ int n_results = get_method_n_ress(function_type); unsigned float_regnum = 0; unsigned n_reg_results = 0; unsigned n_float_result_regs = ARRAY_SIZE(float_result_regs); reg_or_stackslot_t *results = XMALLOCNZ(reg_or_stackslot_t, n_results); regnum = 0; for (int i = 0; i < n_results; ++i) { ir_type *result_type = get_method_res_type(function_type, i); ir_mode *result_mode = get_type_mode(result_type); reg_or_stackslot_t *result = &results[i]; if (mode_is_float(result_mode)) { unsigned n_regs = determine_n_float_regs(result_mode); unsigned next_reg = round_up2(float_regnum, n_regs); if (next_reg >= n_float_result_regs) { panic("too many float results"); } else { const arch_register_t *reg = float_result_regs[next_reg]; rbitset_clear(caller_saves, reg->global_index); if (n_regs == 1) { result->req0 = reg->single_req; } else if (n_regs == 2) { result->req0 = &float_result_reqs_double[next_reg]; rbitset_clear(caller_saves, reg->global_index+1); } else if (n_regs == 4) { result->req0 = &float_result_reqs_quad[next_reg]; rbitset_clear(caller_saves, reg->global_index+1); rbitset_clear(caller_saves, reg->global_index+2); rbitset_clear(caller_saves, reg->global_index+3); } else { panic("invalid number of registers in result"); } float_regnum = next_reg + n_regs; ++n_reg_results; } } else { if (get_mode_size_bits(result_mode) > 32) { panic("results with more than 32bits not supported yet"); } if (regnum >= n_param_regs) { panic("too many results"); } else { const arch_register_t *reg = param_regs[regnum++]; if (irg == NULL || omit_fp) reg = map_i_to_o_reg(reg); result->req0 = reg->single_req; rbitset_clear(caller_saves, reg->global_index); ++n_reg_results; } } } calling_convention_t *cconv = XMALLOCZ(calling_convention_t); cconv->n_parameters = n_params; cconv->parameters = params; cconv->param_stack_size = stack_offset - SPARC_MIN_STACKSIZE; cconv->n_param_regs = n_param_regs_used; cconv->results = results; cconv->omit_fp = omit_fp; cconv->caller_saves = caller_saves; cconv->n_reg_results = n_reg_results; /* setup ignore register array */ if (irg != NULL) { be_irg_t *birg = be_birg_from_irg(irg); birg->allocatable_regs = be_cconv_alloc_all_regs(&birg->obst, N_SPARC_REGISTERS); be_cconv_rem_regs(birg->allocatable_regs, ignore_regs, ARRAY_SIZE(ignore_regs)); } return cconv; }
x86_cconv_t *amd64_decide_calling_convention(ir_type *function_type, ir_graph *irg) { bool omit_fp = false; if (irg != NULL) { omit_fp = be_options.omit_fp; if (omit_fp) irg_walk_graph(irg, check_omit_fp, NULL, &omit_fp); amd64_get_irg_data(irg)->omit_fp = omit_fp; } unsigned *caller_saves = rbitset_malloc(N_AMD64_REGISTERS); unsigned *callee_saves = rbitset_malloc(N_AMD64_REGISTERS); rbitset_copy(caller_saves, default_caller_saves, N_AMD64_REGISTERS); rbitset_copy(callee_saves, default_callee_saves, N_AMD64_REGISTERS); /* determine how parameters are passed */ size_t n_params = get_method_n_params(function_type); size_t param_regnum = 0; size_t float_param_regnum = 0; reg_or_stackslot_t *params = XMALLOCNZ(reg_or_stackslot_t, n_params); /* x64 always reserves space to spill the first 4 arguments to have it * easy in case of variadic functions. */ unsigned stack_offset = amd64_use_x64_abi ? 32 : 0; for (size_t i = 0; i < n_params; ++i) { ir_type *param_type = get_method_param_type(function_type,i); if (is_compound_type(param_type)) panic("compound arguments NIY"); ir_mode *mode = get_type_mode(param_type); int bits = get_mode_size_bits(mode); reg_or_stackslot_t *param = ¶ms[i]; if (mode_is_float(mode) && float_param_regnum < n_float_param_regs && mode != x86_mode_E) { param->reg = float_param_regs[float_param_regnum++]; if (amd64_use_x64_abi) { ++param_regnum; } } else if (!mode_is_float(mode) && param_regnum < n_param_regs) { param->reg = param_regs[param_regnum++]; if (amd64_use_x64_abi) { ++float_param_regnum; } } else { param->type = param_type; param->offset = stack_offset; /* increase offset by at least AMD64_REGISTER_SIZE bytes so * everything is aligned */ stack_offset += round_up2(bits / 8, AMD64_REGISTER_SIZE); } } /* If the function is variadic, we add all unused parameter * passing registers to the end of the params array, first GP, * then XMM. */ if (irg && is_method_variadic(function_type)) { if (amd64_use_x64_abi) { panic("Variadic functions on Windows ABI not supported"); } int params_remaining = (n_param_regs - param_regnum) + (n_float_param_regs - float_param_regnum); params = XREALLOC(params, reg_or_stackslot_t, n_params + params_remaining); size_t i = n_params; for (; param_regnum < n_param_regs; param_regnum++, i++) { params[i].reg = param_regs[param_regnum]; } for (; float_param_regnum < n_float_param_regs; float_param_regnum++, i++) { params[i].reg = float_param_regs[float_param_regnum]; } } unsigned n_param_regs_used = amd64_use_x64_abi ? param_regnum : param_regnum + float_param_regnum; /* determine how results are passed */ size_t n_results = get_method_n_ress(function_type); unsigned n_reg_results = 0; reg_or_stackslot_t *results = XMALLOCNZ(reg_or_stackslot_t, n_results); unsigned res_regnum = 0; unsigned res_float_regnum = 0; unsigned res_x87_regnum = 0; size_t n_result_regs = ARRAY_SIZE(result_regs); size_t n_float_result_regs = ARRAY_SIZE(float_result_regs); size_t n_x87_result_regs = ARRAY_SIZE(x87_result_regs); for (size_t i = 0; i < n_results; ++i) { ir_type *result_type = get_method_res_type(function_type, i); ir_mode *result_mode = get_type_mode(result_type); reg_or_stackslot_t *result = &results[i]; const arch_register_t *reg; if (result_mode == x86_mode_E) { if (res_x87_regnum >= n_x87_result_regs) panic("too manu x87 floating point results"); reg = x87_result_regs[res_x87_regnum++]; } else if (mode_is_float(result_mode)) { if (res_float_regnum >= n_float_result_regs) { panic("too many floating points results"); } reg = float_result_regs[res_float_regnum++]; } else { if (res_regnum >= n_result_regs) { panic("too many results"); } reg = result_regs[res_regnum++]; } result->reg = reg; rbitset_clear(caller_saves, reg->global_index); ++n_reg_results; } x86_cconv_t *cconv = XMALLOCZ(x86_cconv_t); cconv->parameters = params; cconv->n_parameters = n_params; cconv->param_stacksize = stack_offset; cconv->n_param_regs = n_param_regs_used; cconv->n_xmm_regs = float_param_regnum; cconv->results = results; cconv->omit_fp = omit_fp; cconv->caller_saves = caller_saves; cconv->callee_saves = callee_saves; cconv->n_reg_results = n_reg_results; if (irg != NULL) { be_irg_t *birg = be_birg_from_irg(irg); birg->allocatable_regs = be_cconv_alloc_all_regs(&birg->obst, N_AMD64_REGISTERS); be_cconv_rem_regs(birg->allocatable_regs, ignore_regs, ARRAY_SIZE(ignore_regs)); if (!omit_fp) rbitset_clear(birg->allocatable_regs, REG_RBP); } return cconv; }
int check_entity(const ir_entity *entity) { bool fine = true; ir_linkage linkage = get_entity_linkage(entity); if (linkage & IR_LINKAGE_NO_CODEGEN) { if (!is_method_entity(entity)) { report_error("entity %+F has IR_LINKAGE_NO_CODEGEN but is not a function", entity); fine = false; } else if (get_entity_irg(entity) == NULL) { report_error("entity %+F has IR_LINKAGE_NO_CODEGEN but has no ir-graph anyway", entity); fine = false; } if (!is_externally_visible(entity)) { report_error("entity %+F has IR_LINKAGE_NO_CODEGEN but is not externally visible", entity); fine = false; } } check_external_linkage(entity, IR_LINKAGE_WEAK, "WEAK"); check_external_linkage(entity, IR_LINKAGE_GARBAGE_COLLECT, "GARBAGE_COLLECT"); check_external_linkage(entity, IR_LINKAGE_MERGE, "MERGE"); ir_type const *const type = get_entity_type(entity); ir_type const *const owner = get_entity_owner(entity); switch (get_entity_kind(entity)) { case IR_ENTITY_ALIAS: if (!is_segment_type(owner)) { report_error("alias entity %+F has non-segment owner %+F", entity, owner); fine = false; } break; case IR_ENTITY_NORMAL: { ir_initializer_t const *const init = get_entity_initializer(entity); if (init) fine &= check_initializer(init, type, entity); if (!is_data_type(type)) { report_error("normal entity %+F has non-data type %+F", entity, type); fine = false; } break; } case IR_ENTITY_COMPOUND_MEMBER: if (!is_compound_type(owner)) { report_error("compound member entity %+F has non-compound owner %+F", entity, owner); fine = false; } break; case IR_ENTITY_LABEL: if (type != get_code_type()) { report_error("label entity %+F has non-code type %+F", entity, type); fine = false; } break; case IR_ENTITY_METHOD: if (!is_Method_type(type)) { report_error("method entity %+F has non-method type %+F", entity, type); fine = false; } ir_graph *irg = get_entity_irg(entity); if (irg != NULL) { ir_entity *irg_entity = get_irg_entity(irg); if (irg_entity != entity) { report_error("entity(%+F)->irg->entity(%+F) relation invalid", entity, irg_entity); fine = false; } } break; case IR_ENTITY_PARAMETER: if (!is_frame_type(owner)) { report_error("parameter entity %+F has non-frame owner %+F", entity, owner); fine = false; } if (!is_data_type(type)) { report_error("parameter entity %+F has non-data type %+F", entity, type); fine = false; } break; case IR_ENTITY_UNKNOWN: break; case IR_ENTITY_SPILLSLOT: if (is_frame_type(owner)) { report_error("spillslot %+F must be on frame type", entity); fine = false; } break; } if (is_frame_type(owner) && entity_has_definition(entity)) { report_error("entity %+F on frame %+F has initialized", entity, owner); fine = false; } return fine; }
static bool check_initializer(const ir_initializer_t *initializer, const ir_type *type, const ir_entity *context) { bool fine = true; switch (get_initializer_kind(initializer)) { case IR_INITIALIZER_NULL: return fine; case IR_INITIALIZER_TARVAL: { ir_tarval *tv = get_initializer_tarval_value(initializer); if (get_type_mode(type) != get_tarval_mode(tv)) { report_error("tarval initializer for entity %+F has wrong mode: %+F vs %+F", context, get_type_mode(type), get_tarval_mode(tv)); fine = false; } return fine; } case IR_INITIALIZER_CONST: { ir_node *value = get_initializer_const_value(initializer); if (get_type_mode(type) != get_irn_mode(value)) { report_error("const initializer for entity %+F has wrong mode: %+F vs %+F", context, get_type_mode(type), get_irn_mode(value)); fine = false; } if (!constant_on_correct_irg(value)) { report_error("initializer const value %+F for entity %+F not on const-code irg", value, context); fine = false; } return fine; } case IR_INITIALIZER_COMPOUND: { size_t n_entries = get_initializer_compound_n_entries(initializer); if (is_Array_type(type)) { ir_type *element_type = get_array_element_type(type); /* TODO: check array bounds? */ for (size_t i = 0; i < n_entries; ++i) { const ir_initializer_t *sub_initializer = get_initializer_compound_value(initializer, i); check_initializer(sub_initializer, element_type, context); } } else if (is_compound_type(type)) { size_t n_members = get_compound_n_members(type); if (n_entries > n_members) { report_error("too many values in compound initializer of %+F", context); fine = false; } for (size_t i = 0; i < n_entries; ++i) { if (i >= n_members) break; ir_entity *member = get_compound_member(type, i); ir_type *member_type = get_entity_type(member); const ir_initializer_t *sub_initializer = get_initializer_compound_value(initializer, i); check_initializer(sub_initializer, member_type, context); } } else { report_error("compound initiailizer for non-array/compound type in entity %+F", context); fine = false; } return fine; } } report_error("invalid initializer for entity %+F", context); return false; }
size_t bt_btr_start(struct bt_btr *btr, struct bt_field_type *type, const uint8_t *buf, size_t offset, size_t packet_offset, size_t sz, enum bt_btr_status *status) { assert(btr); assert(BYTES_TO_BITS(sz) >= offset); reset(btr); btr->buf.addr = buf; btr->buf.offset = offset; btr->buf.at = 0; btr->buf.packet_offset = packet_offset; btr->buf.buf_sz = sz; btr->buf.sz = BYTES_TO_BITS(sz) - offset; *status = BT_BTR_STATUS_OK; BT_LOGV("Starting decoding: btr-addr=%p, ft-addr=%p, " "buf-addr=%p, buf-size=%zu, offset=%zu, " "packet-offset=%zu", btr, type, buf, sz, offset, packet_offset); /* Set root type */ if (is_compound_type(type)) { /* Compound type: push on visit stack */ int stack_ret; if (btr->user.cbs.types.compound_begin) { BT_LOGV("Calling user function (compound, begin)."); *status = btr->user.cbs.types.compound_begin( type, btr->user.data); BT_LOGV("User function returned: status=%s", bt_btr_status_string(*status)); if (*status != BT_BTR_STATUS_OK) { BT_LOGW("User function failed: btr-addr=%p, status=%s", btr, bt_btr_status_string(*status)); goto end; } } stack_ret = stack_push_with_len(btr, type); if (stack_ret) { /* stack_push_with_len() logs errors */ *status = BT_BTR_STATUS_ERROR; goto end; } btr->state = BTR_STATE_ALIGN_COMPOUND; } else { /* Basic type: set as current basic type */ btr->cur_basic_field_type = type; bt_get(btr->cur_basic_field_type); btr->state = BTR_STATE_ALIGN_BASIC; } /* Run the machine! */ BT_LOGV_STR("Running the state machine."); while (true) { *status = handle_state(btr); if (*status != BT_BTR_STATUS_OK || btr->state == BTR_STATE_DONE) { break; } } /* Update packet offset for next time */ update_packet_offset(btr); end: return btr->buf.at; }
static inline enum bt_btr_status next_field_state(struct bt_btr *btr) { int ret; struct stack_entry *top; struct bt_field_type *next_field_type = NULL; enum bt_btr_status status = BT_BTR_STATUS_OK; if (stack_empty(btr->stack)) { goto end; } top = stack_top(btr->stack); /* Are we done with this base type? */ while (top->index == top->base_len) { if (btr->user.cbs.types.compound_end) { BT_LOGV("Calling user function (compound, end)."); status = btr->user.cbs.types.compound_end( top->base_type, btr->user.data); BT_LOGV("User function returned: status=%s", bt_btr_status_string(status)); if (status != BT_BTR_STATUS_OK) { BT_LOGW("User function failed: btr-addr=%p, status=%s", btr, bt_btr_status_string(status)); goto end; } } stack_pop(btr->stack); /* Are we done with the root type? */ if (stack_empty(btr->stack)) { btr->state = BTR_STATE_DONE; goto end; } top = stack_top(btr->stack); top->index++; } /* Get next field's type */ switch (bt_field_type_get_type_id(top->base_type)) { case BT_FIELD_TYPE_ID_STRUCT: ret = bt_field_type_structure_get_field_by_index( top->base_type, NULL, &next_field_type, top->index); if (ret) { next_field_type = NULL; } break; case BT_FIELD_TYPE_ID_ARRAY: next_field_type = bt_field_type_array_get_element_type( top->base_type); break; case BT_FIELD_TYPE_ID_SEQUENCE: next_field_type = bt_field_type_sequence_get_element_type( top->base_type); break; case BT_FIELD_TYPE_ID_VARIANT: /* Variant types are dynamic: query the user, he should know! */ next_field_type = btr->user.cbs.query.get_variant_type( top->base_type, btr->user.data); break; default: break; } if (!next_field_type) { BT_LOGW("Cannot get the field type of the next field: " "btr-addr=%p, base-ft-addr=%p, base-ft-id=%s, " "index=%" PRId64, btr, top->base_type, bt_field_type_id_string( bt_field_type_get_type_id(top->base_type)), top->index); status = BT_BTR_STATUS_ERROR; goto end; } if (is_compound_type(next_field_type)) { if (btr->user.cbs.types.compound_begin) { BT_LOGV("Calling user function (compound, begin)."); status = btr->user.cbs.types.compound_begin( next_field_type, btr->user.data); BT_LOGV("User function returned: status=%s", bt_btr_status_string(status)); if (status != BT_BTR_STATUS_OK) { BT_LOGW("User function failed: btr-addr=%p, status=%s", btr, bt_btr_status_string(status)); goto end; } } ret = stack_push_with_len(btr, next_field_type); if (ret) { /* stack_push_with_len() logs errors */ status = BT_BTR_STATUS_ERROR; goto end; } /* Next state: align a compound type */ btr->state = BTR_STATE_ALIGN_COMPOUND; } else { /* Replace current basic field type */ BT_LOGV("Replacing current basic field type: " "btr-addr=%p, cur-basic-ft-addr=%p, " "next-basic-ft-addr=%p", btr, btr->cur_basic_field_type, next_field_type); BT_MOVE(btr->cur_basic_field_type, next_field_type); /* Next state: align a basic type */ btr->state = BTR_STATE_ALIGN_BASIC; } end: BT_PUT(next_field_type); return status; }
x86_cconv_t *amd64_decide_calling_convention(ir_type *function_type, ir_graph *irg) { bool omit_fp = false; if (irg != NULL) { omit_fp = be_options.omit_fp; if (omit_fp) irg_walk_graph(irg, check_omit_fp, NULL, &omit_fp); } mtp_additional_properties mtp = get_method_additional_properties(function_type); unsigned *caller_saves = rbitset_malloc(N_AMD64_REGISTERS); unsigned *callee_saves = rbitset_malloc(N_AMD64_REGISTERS); if (mtp & mtp_property_returns_twice) panic("amd64: returns_twice calling convention NIY"); rbitset_copy(caller_saves, default_caller_saves, N_AMD64_REGISTERS); rbitset_copy(callee_saves, default_callee_saves, N_AMD64_REGISTERS); /* determine how parameters are passed */ size_t n_params = get_method_n_params(function_type); size_t param_regnum = 0; size_t float_param_regnum = 0; reg_or_stackslot_t *params = XMALLOCNZ(reg_or_stackslot_t, n_params); /* x64 always reserves space to spill the first 4 arguments to have it * easy in case of variadic functions. */ unsigned stack_offset = amd64_use_x64_abi ? 32 : 0; for (size_t i = 0; i < n_params; ++i) { ir_type *param_type = get_method_param_type(function_type,i); if (is_compound_type(param_type)) panic("amd64: compound arguments NIY"); ir_mode *mode = get_type_mode(param_type); int bits = get_mode_size_bits(mode); reg_or_stackslot_t *param = ¶ms[i]; if (mode_is_float(mode) && float_param_regnum < n_float_param_regs) { param->reg = float_param_regs[float_param_regnum++]; if (amd64_use_x64_abi) ++param_regnum; } else if (!mode_is_float(mode) && param_regnum < n_param_regs) { param->reg = param_regs[param_regnum++]; if (amd64_use_x64_abi) ++float_param_regnum; } else { param->type = param_type; param->offset = stack_offset; /* increase offset by at least AMD64_REGISTER_SIZE bytes so * everything is aligned */ stack_offset += MAX(bits / 8, AMD64_REGISTER_SIZE); continue; } } unsigned n_param_regs_used = amd64_use_x64_abi ? param_regnum : param_regnum + float_param_regnum; /* determine how results are passed */ size_t n_results = get_method_n_ress(function_type); unsigned n_reg_results = 0; reg_or_stackslot_t *results = XMALLOCNZ(reg_or_stackslot_t, n_results); unsigned res_regnum = 0; unsigned res_float_regnum = 0; size_t n_result_regs = ARRAY_SIZE(result_regs); size_t n_float_result_regs = ARRAY_SIZE(float_result_regs); for (size_t i = 0; i < n_results; ++i) { ir_type *result_type = get_method_res_type(function_type, i); ir_mode *result_mode = get_type_mode(result_type); reg_or_stackslot_t *result = &results[i]; const arch_register_t *reg; if (mode_is_float(result_mode)) { if (res_float_regnum >= n_float_result_regs) { panic("too many floating points results"); } reg = float_result_regs[res_float_regnum++]; } else { if (res_regnum >= n_result_regs) { panic("too many results"); } reg = result_regs[res_regnum++]; } result->reg = reg; rbitset_clear(caller_saves, reg->global_index); ++n_reg_results; } x86_cconv_t *cconv = XMALLOCZ(x86_cconv_t); cconv->parameters = params; cconv->callframe_size = stack_offset; cconv->n_param_regs = n_param_regs_used; cconv->n_xmm_regs = float_param_regnum; cconv->results = results; cconv->omit_fp = omit_fp; cconv->caller_saves = caller_saves; cconv->callee_saves = callee_saves; cconv->n_reg_results = n_reg_results; if (irg != NULL) { be_irg_t *birg = be_birg_from_irg(irg); size_t n_ignores = ARRAY_SIZE(ignore_regs); struct obstack *obst = &birg->obst; birg->allocatable_regs = rbitset_obstack_alloc(obst, N_AMD64_REGISTERS); rbitset_set_all(birg->allocatable_regs, N_AMD64_REGISTERS); for (size_t r = 0; r < n_ignores; ++r) { rbitset_clear(birg->allocatable_regs, ignore_regs[r]); } if (!omit_fp) rbitset_clear(birg->allocatable_regs, REG_RBP); } return cconv; }