void be_info_init_irn(ir_node *const node, arch_irn_flags_t const flags, arch_register_req_t const **const in_reqs, unsigned const n_res) { ir_graph *const irg = get_irn_irg(node); struct obstack *const obst = get_irg_obstack(irg); backend_info_t *const info = be_get_info(node); info->flags = flags; info->in_reqs = in_reqs; info->out_infos = NEW_ARR_DZ(reg_out_info_t, obst, n_res); }
ir_node *be_new_Phi0(ir_node *const block, ir_mode *const mode, arch_register_req_t const *const req) { ir_graph *const irg = get_irn_irg(block); ir_node *const phi = new_ir_node(NULL, irg, block, op_Phi, mode, 0, NULL); struct obstack *const obst = be_get_be_obst(irg); backend_info_t *const info = be_get_info(phi); info->out_infos = NEW_ARR_DZ(reg_out_info_t, obst, 1); info->out_infos[0].req = req; return phi; }
/** * Initializes the nodes attributes. */ static void init_sparc_attributes(ir_node *node, arch_irn_flags_t flags, const arch_register_req_t **in_reqs, int n_res) { arch_set_irn_flags(node, flags); arch_set_irn_register_reqs_in(node, in_reqs); backend_info_t *info = be_get_info(node); ir_graph *irg = get_irn_irg(node); struct obstack *obst = get_irg_obstack(irg); info->out_infos = NEW_ARR_DZ(reg_out_info_t, obst, n_res); }
ir_node *be_new_Phi(ir_node *block, int n_ins, ir_node **ins, arch_register_req_t const *req) { ir_graph *irg = get_irn_irg(block); ir_node *phi = new_ir_node(NULL, irg, block, op_Phi, req->cls->mode, n_ins, ins); phi->attr.phi.u.backedge = new_backedge_arr(get_irg_obstack(irg), n_ins); struct obstack *obst = be_get_be_obst(irg); backend_info_t *info = be_get_info(phi); info->out_infos = NEW_ARR_DZ(reg_out_info_t, obst, 1); info->in_reqs = be_allocate_in_reqs(irg, n_ins); info->out_infos[0].req = req; for (int i = 0; i < n_ins; ++i) { info->in_reqs[i] = req; } verify_new_node(phi); return optimize_node(phi); }
ir_node *x86_match_ASM(ir_node const *const node, x86_clobber_name_t const *const additional_clobber_names, x86_asm_constraint_list_t const *const constraints) { unsigned const n_operands = be_count_asm_operands(node); ir_graph *const irg = get_irn_irg(node); struct obstack *const obst = get_irg_obstack(irg); x86_asm_operand_t *const operands = NEW_ARR_DZ(x86_asm_operand_t, obst, n_operands); int const n_inputs = get_ASM_n_inputs(node); size_t const n_out_constraints = get_ASM_n_output_constraints(node); ir_asm_constraint const *const in_constraints = get_ASM_input_constraints(node); ir_asm_constraint const *const out_constraints = get_ASM_output_constraints(node); /* construct output constraints */ size_t const n_clobbers = get_ASM_n_clobbers(node); arch_register_req_t const **out_reqs = NEW_ARR_F(arch_register_req_t const*, 0); for (unsigned o = 0; o < n_out_constraints; ++o) { ir_asm_constraint const *const constraint = &out_constraints[o]; be_asm_constraint_t parsed_constraint; parse_asm_constraints(&parsed_constraint, constraints, constraint->constraint, true); arch_register_req_t const *const req = be_make_register_req(obst, &parsed_constraint, n_out_constraints, out_reqs, o); ARR_APP1(arch_register_req_t const*, out_reqs, req); x86_asm_operand_t *const op = &operands[constraint->pos]; set_operand_if_invalid(op, ASM_OP_OUT_REG, o, constraint); } /* parse clobbers */ unsigned clobber_bits[isa_if->n_register_classes]; memset(&clobber_bits, 0, sizeof(clobber_bits)); ident **const clobbers = get_ASM_clobbers(node); for (size_t c = 0; c < n_clobbers; ++c) { char const *const clobber = get_id_str(clobbers[c]); arch_register_t const *const reg = x86_parse_clobber(additional_clobber_names, clobber); if (reg != NULL) { assert(reg->cls->n_regs <= sizeof(unsigned) * 8); /* x87 registers may still be used as input, even if clobbered. */ if (reg->cls != &ia32_reg_classes[CLASS_ia32_fp]) clobber_bits[reg->cls->index] |= 1U << reg->index; ARR_APP1(arch_register_req_t const*, out_reqs, reg->single_req); } }
void be_info_new_node(ir_graph *irg, ir_node *node) { /* Projs need no be info, all info is fetched from their predecessor */ if (is_Proj(node)) return; struct obstack *obst = be_get_be_obst(irg); backend_info_t *info = OALLOCZ(obst, backend_info_t); assert(node->backend_info == NULL); node->backend_info = info; /* * Set backend info for some middleend nodes which still appear in * backend graphs */ arch_irn_flags_t flags = arch_irn_flag_not_scheduled; arch_register_req_t const *req = arch_no_register_req; switch (get_irn_opcode(node)) { case iro_Block: case iro_Dummy: case iro_Anchor: case iro_Bad: case iro_End: case iro_Unknown: break; case iro_NoMem: case iro_Pin: case iro_Sync: req = arch_memory_req; break; case iro_Phi: flags = arch_irn_flag_schedule_first; break; default: return; } info->flags = flags; info->out_infos = NEW_ARR_DZ(reg_out_info_t, obst, 1); info->out_infos[0].req = req; }
/** * Initializes the generic attribute of all be nodes and return it. */ static void init_node_attr(ir_node *const node, unsigned const n_outputs, arch_irn_flags_t const flags) { ir_graph *irg = get_irn_irg(node); backend_info_t *info = be_get_info(node); unsigned const arity = get_irn_arity(node); arch_register_req_t const **const in_reqs = is_irn_dynamic(node) ? NEW_ARR_F(arch_register_req_t const*, arity) : arity != 0 ? be_allocate_in_reqs(irg, arity) : NULL; for (unsigned i = 0; i < arity; ++i) { in_reqs[i] = arch_no_register_req; } info->in_reqs = in_reqs; struct obstack *const obst = be_get_be_obst(irg); info->out_infos = NEW_ARR_DZ(reg_out_info_t, obst, n_outputs); for (unsigned i = 0; i < n_outputs; ++i) { info->out_infos[i].req = arch_no_register_req; } info->flags = flags; }