static ir_entity *intern_new_entity(ir_type *owner, ir_entity_kind kind, ident *name, ir_type *type, dbg_info *dbgi) { assert(owner != NULL); ir_entity *res = XMALLOCZ(ir_entity); res->kind = k_entity; res->name = name; res->ld_name = NULL; res->type = type; res->owner = owner; res->entity_kind = kind; res->volatility = volatility_non_volatile; res->aligned = align_is_aligned; res->usage = ir_usage_unknown; res->compiler_gen = 0; res->visibility = ir_visibility_external; res->offset = -1; res->offset_bit_remainder = 0; res->alignment = 0; res->link = NULL; #ifdef DEBUG_libfirm res->nr = get_irp_new_node_nr(); #endif /* Remember entity in its owner. */ if (is_compound_type(owner)) add_compound_member(owner, res); res->visit = 0; set_entity_dbg_info(res, dbgi); return res; }
ir_op *new_ir_op(unsigned code, const char *name, op_pin_state p, irop_flags flags, op_arity opar, int op_index, size_t attr_size) { ir_op *res = XMALLOCZ(ir_op); res->code = code; res->name = name; res->pin_state = p; res->attr_size = attr_size; res->flags = flags; res->opar = opar; res->op_index = op_index; res->tag = 0; memset(&res->ops, 0, sizeof(res->ops)); res->ops.hash = default_hash_node; res->ops.copy_attr = default_copy_attr; res->ops.attrs_equal = attrs_equal_true; res->ops.get_type_attr = default_get_type_attr; res->ops.get_entity_attr = default_get_entity_attr; size_t len = ARR_LEN(opcodes); if ((size_t)code >= len) { ARR_RESIZE(ir_op*, opcodes, (size_t)code+1); memset(&opcodes[len], 0, (code-len+1) * sizeof(opcodes[0])); }
ir_graph_pass_t *lower_highlevel_graph_pass(const char *name) { pass_t *pass = XMALLOCZ(pass_t); return def_graph_pass_constructor( &pass->pass, name ? name : "lower_hl", lower_highlevel_graph_wrapper); }
sp_matrix_t *new_matrix(int row_init, int col_init) { sp_matrix_t *res = XMALLOCZ(sp_matrix_t); res->maxrow = -1; res->maxcol = -1; m_alloc_row(res, 0, MAX(0, row_init)); m_alloc_col(res, 0, MAX(0, col_init)); return res; }
/** Add a target specific preprocessor define. */ static target_define_t *ppdef(const char *name, const char *value) { target_define_t *define = XMALLOCZ(target_define_t); define->name = name; define->value = value; define->next = target.defines; target.defines = define; return define; }
gs_matrix_t *gs_new_matrix(int n_init_rows, int n_init_cols) { gs_matrix_t *res = XMALLOCZ(gs_matrix_t); if (n_init_rows < 16) n_init_rows = 16; res->initial_col_increase = n_init_cols; alloc_rows(res, n_init_rows, n_init_cols, 0); return res; }
void timer_register(ir_timer_t *timer, const char *description) { timer_info_t *info = XMALLOCZ(timer_info_t); info->description = xstrdup(description); info->timer = timer; if (last_info != NULL) { last_info->next = info; } else { infos = info; } last_info = info; }
/** * Allocate a new IR graph. * This function respects the registered graph data. The only reason for * this function is, that there are two locations, where graphs are * allocated (new_r_ir_graph, new_const_code_irg). * @return Memory for a new graph. */ static ir_graph *alloc_graph(void) { ir_graph *const res = XMALLOCZ(ir_graph); res->kind = k_ir_graph; /* initialize the idx->node map. */ res->idx_irn_map = NEW_ARR_FZ(ir_node*, INITIAL_IDX_IRN_MAP_SIZE); obstack_init(&res->obst); /* value table for global value numbering for optimizing use in iropt.c */ new_identities(res); return res; }
/** * Create a new incomplete ir_prog. */ static ir_prog *new_incomplete_ir_prog(void) { ir_prog *res = XMALLOCZ(ir_prog); res->graphs = NEW_ARR_F(ir_graph *, 0); res->types = NEW_ARR_F(ir_type *, 0); res->global_asms = NEW_ARR_F(ident *, 0); res->last_label_nr = 1; /* 0 is reserved as non-label */ res->max_irg_idx = 0; res->max_node_nr = 0; #ifndef NDEBUG res->reserved_resources = IRP_RESOURCE_NONE; #endif res->globals = pmap_create(); return res; }
calling_convention_t *sparc_decide_calling_convention(ir_type *function_type, ir_graph *irg) { bool omit_fp = false; if (irg != NULL) { omit_fp = be_options.omit_fp; /* our current vaarg handling needs the standard space to store the * args 0-5 in it */ if (is_method_variadic(function_type)) omit_fp = false; /* The pointer to the aggregate return value belongs to the 92 magic bytes. * Thus, if the called functions increases the stack size, * it must copy the value to the appropriate location. * This is not implemented yet, so we forbid to omit the frame pointer. */ if (get_method_calling_convention(function_type) & cc_compound_ret) omit_fp = false; if (omit_fp) irg_walk_graph(irg, check_omit_fp, NULL, &omit_fp); sparc_get_irg_data(irg)->omit_fp = omit_fp; } mtp_additional_properties mtp = get_method_additional_properties(function_type); unsigned *caller_saves = rbitset_malloc(N_SPARC_REGISTERS); if (mtp & mtp_property_returns_twice) { rbitset_copy(caller_saves, default_returns_twice_saves, N_SPARC_REGISTERS); } else { rbitset_copy(caller_saves, default_caller_saves, N_SPARC_REGISTERS); } /* determine how parameters are passed */ int n_params = get_method_n_params(function_type); int regnum = 0; reg_or_stackslot_t *params = XMALLOCNZ(reg_or_stackslot_t, n_params); int n_param_regs = ARRAY_SIZE(param_regs); unsigned stack_offset = !omit_fp ? SPARC_MIN_STACKSIZE : 0; for (int i = 0; i < n_params; ++i) { ir_type *param_type = get_method_param_type(function_type,i); ir_mode *mode; int bits; reg_or_stackslot_t *param; if (is_compound_type(param_type)) panic("compound arguments not supported yet"); mode = get_type_mode(param_type); bits = get_mode_size_bits(mode); param = ¶ms[i]; if (i == 0 && (get_method_calling_convention(function_type) & cc_compound_ret)) { assert(mode_is_reference(mode) && bits == 32); /* special case, we have reserved space for this on the between * type */ param->type = param_type; param->offset = SPARC_AGGREGATE_RETURN_OFFSET; param->already_stored = true; continue; } if (regnum < n_param_regs) { param->offset = SPARC_PARAMS_SPILL_OFFSET + regnum * SPARC_REGISTER_SIZE; param->type = param_type; arch_register_t const *reg = param_regs[regnum++]; if (irg == NULL || omit_fp) reg = map_i_to_o_reg(reg); param->reg0 = reg; param->req0 = reg->single_req; } else { param->type = param_type; param->offset = stack_offset; param->already_stored = true; /* increase offset by at least SPARC_REGISTER_SIZE bytes so * everything is aligned */ stack_offset += MAX(bits / 8, SPARC_REGISTER_SIZE); continue; } /* we might need a 2nd 32bit component (for 64bit or double values) */ if (bits > 32) { if (bits > 64) panic("only 32 and 64bit modes supported"); if (regnum < n_param_regs) { param->offset = SPARC_PARAMS_SPILL_OFFSET + regnum * SPARC_REGISTER_SIZE; arch_register_t const *reg = param_regs[regnum++]; if (irg == NULL || omit_fp) reg = map_i_to_o_reg(reg); param->reg1 = reg; param->req1 = reg->single_req; } else { ir_mode *regmode = param_regs[0]->cls->mode; ir_type *type = get_type_for_mode(regmode); param->type = type; param->offset = stack_offset; assert(get_mode_size_bits(regmode) == 32); stack_offset += SPARC_REGISTER_SIZE; } } } unsigned n_param_regs_used = regnum; /* determine how results are passed */ int n_results = get_method_n_ress(function_type); unsigned float_regnum = 0; unsigned n_reg_results = 0; unsigned n_float_result_regs = ARRAY_SIZE(float_result_regs); reg_or_stackslot_t *results = XMALLOCNZ(reg_or_stackslot_t, n_results); regnum = 0; for (int i = 0; i < n_results; ++i) { ir_type *result_type = get_method_res_type(function_type, i); ir_mode *result_mode = get_type_mode(result_type); reg_or_stackslot_t *result = &results[i]; if (mode_is_float(result_mode)) { unsigned n_regs = determine_n_float_regs(result_mode); unsigned next_reg = round_up2(float_regnum, n_regs); if (next_reg >= n_float_result_regs) { panic("too many float results"); } else { const arch_register_t *reg = float_result_regs[next_reg]; rbitset_clear(caller_saves, reg->global_index); if (n_regs == 1) { result->req0 = reg->single_req; } else if (n_regs == 2) { result->req0 = &float_result_reqs_double[next_reg]; rbitset_clear(caller_saves, reg->global_index+1); } else if (n_regs == 4) { result->req0 = &float_result_reqs_quad[next_reg]; rbitset_clear(caller_saves, reg->global_index+1); rbitset_clear(caller_saves, reg->global_index+2); rbitset_clear(caller_saves, reg->global_index+3); } else { panic("invalid number of registers in result"); } float_regnum = next_reg + n_regs; ++n_reg_results; } } else { if (get_mode_size_bits(result_mode) > 32) { panic("results with more than 32bits not supported yet"); } if (regnum >= n_param_regs) { panic("too many results"); } else { const arch_register_t *reg = param_regs[regnum++]; if (irg == NULL || omit_fp) reg = map_i_to_o_reg(reg); result->req0 = reg->single_req; rbitset_clear(caller_saves, reg->global_index); ++n_reg_results; } } } calling_convention_t *cconv = XMALLOCZ(calling_convention_t); cconv->n_parameters = n_params; cconv->parameters = params; cconv->param_stack_size = stack_offset - SPARC_MIN_STACKSIZE; cconv->n_param_regs = n_param_regs_used; cconv->results = results; cconv->omit_fp = omit_fp; cconv->caller_saves = caller_saves; cconv->n_reg_results = n_reg_results; /* setup ignore register array */ if (irg != NULL) { be_irg_t *birg = be_birg_from_irg(irg); birg->allocatable_regs = be_cconv_alloc_all_regs(&birg->obst, N_SPARC_REGISTERS); be_cconv_rem_regs(birg->allocatable_regs, ignore_regs, ARRAY_SIZE(ignore_regs)); } return cconv; }
x86_cconv_t *ia32_decide_calling_convention(ir_type *function_type, ir_graph *irg) { bool omit_fp = false; if (irg != NULL) { omit_fp = be_options.omit_fp; if (omit_fp) irg_walk_graph(irg, check_omit_fp, NULL, &omit_fp); } mtp_additional_properties mtp = get_method_additional_properties(function_type); (void)mtp; /* TODO: do something with cc_reg_param/cc_this_call */ unsigned *caller_saves = rbitset_malloc(N_IA32_REGISTERS); unsigned *callee_saves = rbitset_malloc(N_IA32_REGISTERS); rbitset_copy(caller_saves, default_caller_saves, N_IA32_REGISTERS); rbitset_copy(callee_saves, default_callee_saves, N_IA32_REGISTERS); /* determine how parameters are passed */ unsigned n_params = get_method_n_params(function_type); unsigned param_regnum = 0; unsigned float_param_regnum = 0; reg_or_stackslot_t *params = XMALLOCNZ(reg_or_stackslot_t, n_params); unsigned n_param_regs = ARRAY_SIZE(default_param_regs); unsigned n_float_param_regs = ARRAY_SIZE(float_param_regs); unsigned stack_offset = 0; for (unsigned i = 0; i < n_params; ++i) { ir_type *param_type = get_method_param_type(function_type, i); reg_or_stackslot_t *param = ¶ms[i]; if (is_aggregate_type(param_type)) { param->type = param_type; param->offset = stack_offset; stack_offset += get_type_size_bytes(param_type); goto align_stack; } ir_mode *mode = get_type_mode(param_type); if (mode_is_float(mode) && float_param_regnum < n_float_param_regs) { param->reg = float_param_regs[float_param_regnum++]; } else if (!mode_is_float(mode) && param_regnum < n_param_regs) { param->reg = default_param_regs[param_regnum++]; } else { param->type = param_type; param->offset = stack_offset; stack_offset += get_type_size_bytes(param_type); align_stack:; /* increase offset by at least IA32_REGISTER_SIZE bytes so * everything is aligned */ unsigned misalign = stack_offset % IA32_REGISTER_SIZE; if (misalign > 0) stack_offset += IA32_REGISTER_SIZE - misalign; } } unsigned n_param_regs_used = param_regnum + float_param_regnum; /* determine how results are passed */ unsigned n_results = get_method_n_ress(function_type); unsigned n_reg_results = 0; reg_or_stackslot_t *results = XMALLOCNZ(reg_or_stackslot_t, n_results); unsigned res_regnum = 0; unsigned res_float_regnum = 0; unsigned n_result_regs = ARRAY_SIZE(result_regs); unsigned n_float_result_regs = ARRAY_SIZE(float_result_regs); for (size_t i = 0; i < n_results; ++i) { ir_type *result_type = get_method_res_type(function_type, i); ir_mode *result_mode = get_type_mode(result_type); reg_or_stackslot_t *result = &results[i]; const arch_register_t *reg; if (mode_is_float(result_mode)) { if (res_float_regnum >= n_float_result_regs) { panic("too many floating points results"); } reg = float_result_regs[res_float_regnum++]; } else { if (res_regnum >= n_result_regs) { panic("too many results"); } reg = result_regs[res_regnum++]; } result->reg = reg; rbitset_clear(caller_saves, reg->global_index); ++n_reg_results; } calling_convention cc = get_method_calling_convention(function_type); x86_cconv_t *cconv = XMALLOCZ(x86_cconv_t); cconv->sp_delta = (cc & cc_compound_ret) && !(cc & cc_reg_param) ? IA32_REGISTER_SIZE : 0; cconv->parameters = params; cconv->n_parameters = n_params; cconv->callframe_size = stack_offset; cconv->n_param_regs = n_param_regs_used; cconv->n_xmm_regs = float_param_regnum; cconv->results = results; cconv->omit_fp = omit_fp; cconv->caller_saves = caller_saves; cconv->callee_saves = callee_saves; cconv->n_reg_results = n_reg_results; if (irg != NULL) { be_irg_t *birg = be_birg_from_irg(irg); size_t n_ignores = ARRAY_SIZE(ignore_regs); struct obstack *obst = &birg->obst; birg->allocatable_regs = rbitset_obstack_alloc(obst, N_IA32_REGISTERS); rbitset_set_all(birg->allocatable_regs, N_IA32_REGISTERS); for (size_t r = 0; r < n_ignores; ++r) { rbitset_clear(birg->allocatable_regs, ignore_regs[r]); } if (!omit_fp) rbitset_clear(birg->allocatable_regs, REG_EBP); } return cconv; }
calling_convention_t *arm_decide_calling_convention(const ir_graph *irg, ir_type *function_type) { /* determine how parameters are passed */ unsigned stack_offset = 0; size_t const n_param_regs = ARRAY_SIZE(param_regs); size_t const n_params = get_method_n_params(function_type); size_t regnum = 0; reg_or_stackslot_t *params = XMALLOCNZ(reg_or_stackslot_t, n_params); for (size_t i = 0; i < n_params; ++i) { ir_type *param_type = get_method_param_type(function_type,i); ir_mode *mode = get_type_mode(param_type); int bits = get_mode_size_bits(mode); reg_or_stackslot_t *param = ¶ms[i]; param->type = param_type; /* doubleword modes need to be passed in even registers */ if (param_type->flags & tf_lowered_dw) { if (regnum < n_param_regs) { if ((regnum & 1) != 0) ++regnum; } else { unsigned misalign = stack_offset % 8; if (misalign > 0) stack_offset += 8 - misalign; } } if (regnum < n_param_regs) { param->reg0 = param_regs[regnum++]; } else { param->offset = stack_offset; /* increase offset 4 bytes so everything is aligned */ stack_offset += MAX(bits / 8, 4); continue; } /* we might need a 2nd 32bit component (for 64bit or double values) */ if (bits > 32) { if (bits > 64) panic("only 32 and 64bit modes supported"); if (regnum < n_param_regs) { const arch_register_t *reg = param_regs[regnum++]; param->reg1 = reg; } else { ir_mode *pmode = param_regs[0]->cls->mode; ir_type *type = get_type_for_mode(pmode); param->type = type; param->offset = stack_offset; assert(get_mode_size_bits(pmode) == 32); stack_offset += 4; } } } unsigned const n_param_regs_used = regnum; size_t const n_result_regs= ARRAY_SIZE(result_regs); size_t const n_float_result_regs = ARRAY_SIZE(float_result_regs); size_t n_results = get_method_n_ress(function_type); size_t float_regnum = 0; reg_or_stackslot_t *results = XMALLOCNZ(reg_or_stackslot_t, n_results); regnum = 0; for (size_t i = 0; i < n_results; ++i) { ir_type *result_type = get_method_res_type(function_type, i); ir_mode *result_mode = get_type_mode(result_type); reg_or_stackslot_t *result = &results[i]; if (mode_is_float(result_mode)) { if (float_regnum >= n_float_result_regs) { panic("too many float results"); } else { const arch_register_t *reg = float_result_regs[float_regnum++]; result->reg0 = reg; } } else { if (get_mode_size_bits(result_mode) > 32) { panic("results with more than 32bits not supported yet"); } if (regnum >= n_result_regs) { panic("too many results"); } else { const arch_register_t *reg = result_regs[regnum++]; result->reg0 = reg; } } } calling_convention_t *cconv = XMALLOCZ(calling_convention_t); cconv->parameters = params; cconv->n_parameters = n_params; cconv->param_stack_size = stack_offset; cconv->n_param_regs = n_param_regs_used; cconv->results = results; /* setup allocatable registers */ if (irg != NULL) { be_irg_t *birg = be_birg_from_irg(irg); assert(birg->allocatable_regs == NULL); birg->allocatable_regs = be_cconv_alloc_all_regs(&birg->obst, N_ARM_REGISTERS); be_cconv_rem_regs(birg->allocatable_regs, ignore_regs, ARRAY_SIZE(ignore_regs)); arm_get_irg_data(irg)->omit_fp = true; } return cconv; }
lc_arg_env_t *lc_arg_new_env(void) { lc_arg_env_t *env = XMALLOCZ(lc_arg_env_t); env->args = new_set(lc_arg_cmp, 16); return env; }
x86_cconv_t *amd64_decide_calling_convention(ir_type *function_type, ir_graph *irg) { bool omit_fp = false; if (irg != NULL) { omit_fp = be_options.omit_fp; if (omit_fp) irg_walk_graph(irg, check_omit_fp, NULL, &omit_fp); amd64_get_irg_data(irg)->omit_fp = omit_fp; } unsigned *caller_saves = rbitset_malloc(N_AMD64_REGISTERS); unsigned *callee_saves = rbitset_malloc(N_AMD64_REGISTERS); rbitset_copy(caller_saves, default_caller_saves, N_AMD64_REGISTERS); rbitset_copy(callee_saves, default_callee_saves, N_AMD64_REGISTERS); /* determine how parameters are passed */ size_t n_params = get_method_n_params(function_type); size_t param_regnum = 0; size_t float_param_regnum = 0; reg_or_stackslot_t *params = XMALLOCNZ(reg_or_stackslot_t, n_params); /* x64 always reserves space to spill the first 4 arguments to have it * easy in case of variadic functions. */ unsigned stack_offset = amd64_use_x64_abi ? 32 : 0; for (size_t i = 0; i < n_params; ++i) { ir_type *param_type = get_method_param_type(function_type,i); if (is_compound_type(param_type)) panic("compound arguments NIY"); ir_mode *mode = get_type_mode(param_type); int bits = get_mode_size_bits(mode); reg_or_stackslot_t *param = ¶ms[i]; if (mode_is_float(mode) && float_param_regnum < n_float_param_regs && mode != x86_mode_E) { param->reg = float_param_regs[float_param_regnum++]; if (amd64_use_x64_abi) { ++param_regnum; } } else if (!mode_is_float(mode) && param_regnum < n_param_regs) { param->reg = param_regs[param_regnum++]; if (amd64_use_x64_abi) { ++float_param_regnum; } } else { param->type = param_type; param->offset = stack_offset; /* increase offset by at least AMD64_REGISTER_SIZE bytes so * everything is aligned */ stack_offset += round_up2(bits / 8, AMD64_REGISTER_SIZE); } } /* If the function is variadic, we add all unused parameter * passing registers to the end of the params array, first GP, * then XMM. */ if (irg && is_method_variadic(function_type)) { if (amd64_use_x64_abi) { panic("Variadic functions on Windows ABI not supported"); } int params_remaining = (n_param_regs - param_regnum) + (n_float_param_regs - float_param_regnum); params = XREALLOC(params, reg_or_stackslot_t, n_params + params_remaining); size_t i = n_params; for (; param_regnum < n_param_regs; param_regnum++, i++) { params[i].reg = param_regs[param_regnum]; } for (; float_param_regnum < n_float_param_regs; float_param_regnum++, i++) { params[i].reg = float_param_regs[float_param_regnum]; } } unsigned n_param_regs_used = amd64_use_x64_abi ? param_regnum : param_regnum + float_param_regnum; /* determine how results are passed */ size_t n_results = get_method_n_ress(function_type); unsigned n_reg_results = 0; reg_or_stackslot_t *results = XMALLOCNZ(reg_or_stackslot_t, n_results); unsigned res_regnum = 0; unsigned res_float_regnum = 0; unsigned res_x87_regnum = 0; size_t n_result_regs = ARRAY_SIZE(result_regs); size_t n_float_result_regs = ARRAY_SIZE(float_result_regs); size_t n_x87_result_regs = ARRAY_SIZE(x87_result_regs); for (size_t i = 0; i < n_results; ++i) { ir_type *result_type = get_method_res_type(function_type, i); ir_mode *result_mode = get_type_mode(result_type); reg_or_stackslot_t *result = &results[i]; const arch_register_t *reg; if (result_mode == x86_mode_E) { if (res_x87_regnum >= n_x87_result_regs) panic("too manu x87 floating point results"); reg = x87_result_regs[res_x87_regnum++]; } else if (mode_is_float(result_mode)) { if (res_float_regnum >= n_float_result_regs) { panic("too many floating points results"); } reg = float_result_regs[res_float_regnum++]; } else { if (res_regnum >= n_result_regs) { panic("too many results"); } reg = result_regs[res_regnum++]; } result->reg = reg; rbitset_clear(caller_saves, reg->global_index); ++n_reg_results; } x86_cconv_t *cconv = XMALLOCZ(x86_cconv_t); cconv->parameters = params; cconv->n_parameters = n_params; cconv->param_stacksize = stack_offset; cconv->n_param_regs = n_param_regs_used; cconv->n_xmm_regs = float_param_regnum; cconv->results = results; cconv->omit_fp = omit_fp; cconv->caller_saves = caller_saves; cconv->callee_saves = callee_saves; cconv->n_reg_results = n_reg_results; if (irg != NULL) { be_irg_t *birg = be_birg_from_irg(irg); birg->allocatable_regs = be_cconv_alloc_all_regs(&birg->obst, N_AMD64_REGISTERS); be_cconv_rem_regs(birg->allocatable_regs, ignore_regs, ARRAY_SIZE(ignore_regs)); if (!omit_fp) rbitset_clear(birg->allocatable_regs, REG_RBP); } return cconv; }
gs_matrix_t *gs_new_matrix(unsigned n_init_rows, unsigned n_init_cols) { gs_matrix_t *res = XMALLOCZ(gs_matrix_t); alloc_rows(res, n_init_rows, n_init_cols, 0); return res; }
x86_cconv_t *amd64_decide_calling_convention(ir_type *function_type, ir_graph *irg) { bool omit_fp = false; if (irg != NULL) { omit_fp = be_options.omit_fp; if (omit_fp) irg_walk_graph(irg, check_omit_fp, NULL, &omit_fp); } mtp_additional_properties mtp = get_method_additional_properties(function_type); unsigned *caller_saves = rbitset_malloc(N_AMD64_REGISTERS); unsigned *callee_saves = rbitset_malloc(N_AMD64_REGISTERS); if (mtp & mtp_property_returns_twice) panic("amd64: returns_twice calling convention NIY"); rbitset_copy(caller_saves, default_caller_saves, N_AMD64_REGISTERS); rbitset_copy(callee_saves, default_callee_saves, N_AMD64_REGISTERS); /* determine how parameters are passed */ size_t n_params = get_method_n_params(function_type); size_t param_regnum = 0; size_t float_param_regnum = 0; reg_or_stackslot_t *params = XMALLOCNZ(reg_or_stackslot_t, n_params); /* x64 always reserves space to spill the first 4 arguments to have it * easy in case of variadic functions. */ unsigned stack_offset = amd64_use_x64_abi ? 32 : 0; for (size_t i = 0; i < n_params; ++i) { ir_type *param_type = get_method_param_type(function_type,i); if (is_compound_type(param_type)) panic("amd64: compound arguments NIY"); ir_mode *mode = get_type_mode(param_type); int bits = get_mode_size_bits(mode); reg_or_stackslot_t *param = ¶ms[i]; if (mode_is_float(mode) && float_param_regnum < n_float_param_regs) { param->reg = float_param_regs[float_param_regnum++]; if (amd64_use_x64_abi) ++param_regnum; } else if (!mode_is_float(mode) && param_regnum < n_param_regs) { param->reg = param_regs[param_regnum++]; if (amd64_use_x64_abi) ++float_param_regnum; } else { param->type = param_type; param->offset = stack_offset; /* increase offset by at least AMD64_REGISTER_SIZE bytes so * everything is aligned */ stack_offset += MAX(bits / 8, AMD64_REGISTER_SIZE); continue; } } unsigned n_param_regs_used = amd64_use_x64_abi ? param_regnum : param_regnum + float_param_regnum; /* determine how results are passed */ size_t n_results = get_method_n_ress(function_type); unsigned n_reg_results = 0; reg_or_stackslot_t *results = XMALLOCNZ(reg_or_stackslot_t, n_results); unsigned res_regnum = 0; unsigned res_float_regnum = 0; size_t n_result_regs = ARRAY_SIZE(result_regs); size_t n_float_result_regs = ARRAY_SIZE(float_result_regs); for (size_t i = 0; i < n_results; ++i) { ir_type *result_type = get_method_res_type(function_type, i); ir_mode *result_mode = get_type_mode(result_type); reg_or_stackslot_t *result = &results[i]; const arch_register_t *reg; if (mode_is_float(result_mode)) { if (res_float_regnum >= n_float_result_regs) { panic("too many floating points results"); } reg = float_result_regs[res_float_regnum++]; } else { if (res_regnum >= n_result_regs) { panic("too many results"); } reg = result_regs[res_regnum++]; } result->reg = reg; rbitset_clear(caller_saves, reg->global_index); ++n_reg_results; } x86_cconv_t *cconv = XMALLOCZ(x86_cconv_t); cconv->parameters = params; cconv->callframe_size = stack_offset; cconv->n_param_regs = n_param_regs_used; cconv->n_xmm_regs = float_param_regnum; cconv->results = results; cconv->omit_fp = omit_fp; cconv->caller_saves = caller_saves; cconv->callee_saves = callee_saves; cconv->n_reg_results = n_reg_results; if (irg != NULL) { be_irg_t *birg = be_birg_from_irg(irg); size_t n_ignores = ARRAY_SIZE(ignore_regs); struct obstack *obst = &birg->obst; birg->allocatable_regs = rbitset_obstack_alloc(obst, N_AMD64_REGISTERS); rbitset_set_all(birg->allocatable_regs, N_AMD64_REGISTERS); for (size_t r = 0; r < n_ignores; ++r) { rbitset_clear(birg->allocatable_regs, ignore_regs[r]); } if (!omit_fp) rbitset_clear(birg->allocatable_regs, REG_RBP); } return cconv; }