static void sparc_determine_frameoffset(ir_node *const node, int const sp_offset) { if (is_sparc_FrameAddr(node)) { sparc_attr_t *const attr = get_sparc_attr(node); ir_entity const *const entity = attr->immediate_value_entity; if (entity != NULL) { attr->immediate_value += get_entity_offset(entity); if (node_has_sp_base(node)) attr->immediate_value += sp_offset + SPARC_MIN_STACKSIZE; } } else if (sparc_has_load_store_attr(node)) { sparc_load_store_attr_t *const attr = get_sparc_load_store_attr(node); if (!attr->is_frame_entity) return; ir_entity const *const entity = attr->base.immediate_value_entity; if (entity != NULL) { attr->base.immediate_value += get_entity_offset(entity); if (node_has_sp_base(node)) attr->base.immediate_value += sp_offset + SPARC_MIN_STACKSIZE; } } else if (be_is_MemPerm(node)) { ir_graph *irg = get_irn_irg(node); if (sparc_get_irg_data(irg)->omit_fp) be_set_MemPerm_offset(node, sp_offset + SPARC_MIN_STACKSIZE); } }
calling_convention_t *sparc_decide_calling_convention(ir_type *function_type, ir_graph *irg) { bool omit_fp = false; if (irg != NULL) { omit_fp = be_options.omit_fp; /* our current vaarg handling needs the standard space to store the * args 0-5 in it */ if (is_method_variadic(function_type)) omit_fp = false; /* The pointer to the aggregate return value belongs to the 92 magic bytes. * Thus, if the called functions increases the stack size, * it must copy the value to the appropriate location. * This is not implemented yet, so we forbid to omit the frame pointer. */ if (get_method_calling_convention(function_type) & cc_compound_ret) omit_fp = false; if (omit_fp) irg_walk_graph(irg, check_omit_fp, NULL, &omit_fp); sparc_get_irg_data(irg)->omit_fp = omit_fp; } mtp_additional_properties mtp = get_method_additional_properties(function_type); unsigned *caller_saves = rbitset_malloc(N_SPARC_REGISTERS); if (mtp & mtp_property_returns_twice) { rbitset_copy(caller_saves, default_returns_twice_saves, N_SPARC_REGISTERS); } else { rbitset_copy(caller_saves, default_caller_saves, N_SPARC_REGISTERS); } /* determine how parameters are passed */ int n_params = get_method_n_params(function_type); int regnum = 0; reg_or_stackslot_t *params = XMALLOCNZ(reg_or_stackslot_t, n_params); int n_param_regs = ARRAY_SIZE(param_regs); unsigned stack_offset = !omit_fp ? SPARC_MIN_STACKSIZE : 0; for (int i = 0; i < n_params; ++i) { ir_type *param_type = get_method_param_type(function_type,i); ir_mode *mode; int bits; reg_or_stackslot_t *param; if (is_compound_type(param_type)) panic("compound arguments not supported yet"); mode = get_type_mode(param_type); bits = get_mode_size_bits(mode); param = ¶ms[i]; if (i == 0 && (get_method_calling_convention(function_type) & cc_compound_ret)) { assert(mode_is_reference(mode) && bits == 32); /* special case, we have reserved space for this on the between * type */ param->type = param_type; param->offset = SPARC_AGGREGATE_RETURN_OFFSET; param->already_stored = true; continue; } if (regnum < n_param_regs) { param->offset = SPARC_PARAMS_SPILL_OFFSET + regnum * SPARC_REGISTER_SIZE; param->type = param_type; arch_register_t const *reg = param_regs[regnum++]; if (irg == NULL || omit_fp) reg = map_i_to_o_reg(reg); param->reg0 = reg; param->req0 = reg->single_req; } else { param->type = param_type; param->offset = stack_offset; param->already_stored = true; /* increase offset by at least SPARC_REGISTER_SIZE bytes so * everything is aligned */ stack_offset += MAX(bits / 8, SPARC_REGISTER_SIZE); continue; } /* we might need a 2nd 32bit component (for 64bit or double values) */ if (bits > 32) { if (bits > 64) panic("only 32 and 64bit modes supported"); if (regnum < n_param_regs) { param->offset = SPARC_PARAMS_SPILL_OFFSET + regnum * SPARC_REGISTER_SIZE; arch_register_t const *reg = param_regs[regnum++]; if (irg == NULL || omit_fp) reg = map_i_to_o_reg(reg); param->reg1 = reg; param->req1 = reg->single_req; } else { ir_mode *regmode = param_regs[0]->cls->mode; ir_type *type = get_type_for_mode(regmode); param->type = type; param->offset = stack_offset; assert(get_mode_size_bits(regmode) == 32); stack_offset += SPARC_REGISTER_SIZE; } } } unsigned n_param_regs_used = regnum; /* determine how results are passed */ int n_results = get_method_n_ress(function_type); unsigned float_regnum = 0; unsigned n_reg_results = 0; unsigned n_float_result_regs = ARRAY_SIZE(float_result_regs); reg_or_stackslot_t *results = XMALLOCNZ(reg_or_stackslot_t, n_results); regnum = 0; for (int i = 0; i < n_results; ++i) { ir_type *result_type = get_method_res_type(function_type, i); ir_mode *result_mode = get_type_mode(result_type); reg_or_stackslot_t *result = &results[i]; if (mode_is_float(result_mode)) { unsigned n_regs = determine_n_float_regs(result_mode); unsigned next_reg = round_up2(float_regnum, n_regs); if (next_reg >= n_float_result_regs) { panic("too many float results"); } else { const arch_register_t *reg = float_result_regs[next_reg]; rbitset_clear(caller_saves, reg->global_index); if (n_regs == 1) { result->req0 = reg->single_req; } else if (n_regs == 2) { result->req0 = &float_result_reqs_double[next_reg]; rbitset_clear(caller_saves, reg->global_index+1); } else if (n_regs == 4) { result->req0 = &float_result_reqs_quad[next_reg]; rbitset_clear(caller_saves, reg->global_index+1); rbitset_clear(caller_saves, reg->global_index+2); rbitset_clear(caller_saves, reg->global_index+3); } else { panic("invalid number of registers in result"); } float_regnum = next_reg + n_regs; ++n_reg_results; } } else { if (get_mode_size_bits(result_mode) > 32) { panic("results with more than 32bits not supported yet"); } if (regnum >= n_param_regs) { panic("too many results"); } else { const arch_register_t *reg = param_regs[regnum++]; if (irg == NULL || omit_fp) reg = map_i_to_o_reg(reg); result->req0 = reg->single_req; rbitset_clear(caller_saves, reg->global_index); ++n_reg_results; } } } calling_convention_t *cconv = XMALLOCZ(calling_convention_t); cconv->n_parameters = n_params; cconv->parameters = params; cconv->param_stack_size = stack_offset - SPARC_MIN_STACKSIZE; cconv->n_param_regs = n_param_regs_used; cconv->results = results; cconv->omit_fp = omit_fp; cconv->caller_saves = caller_saves; cconv->n_reg_results = n_reg_results; /* setup ignore register array */ if (irg != NULL) { be_irg_t *birg = be_birg_from_irg(irg); birg->allocatable_regs = be_cconv_alloc_all_regs(&birg->obst, N_SPARC_REGISTERS); be_cconv_rem_regs(birg->allocatable_regs, ignore_regs, ARRAY_SIZE(ignore_regs)); } return cconv; }