/** * Pre-Walker called by compute_callgraph(), analyses all Call nodes. */ static void ana_Call(ir_node *n, void *env) { (void)env; if (!is_Call(n)) return; ir_graph *irg = get_irn_irg(n); for (size_t i = 0, n_callees = cg_get_call_n_callees(n); i < n_callees; ++i) { ir_entity *callee_e = cg_get_call_callee(n, i); ir_graph *callee = get_entity_linktime_irg(callee_e); if (callee) { cg_callee_entry buf; buf.irg = callee; pset_insert((pset *)callee->callers, irg, hash_ptr(irg)); cg_callee_entry *found = (cg_callee_entry*) pset_find((pset *)irg->callees, &buf, hash_ptr(callee)); if (found) { /* add Call node to list, compute new nesting. */ ir_node **arr = found->call_list; ARR_APP1(ir_node *, arr, n); found->call_list = arr; } else { /* New node, add Call node and init nesting. */ found = OALLOC(get_irg_obstack(irg), cg_callee_entry); found->irg = callee; found->call_list = NEW_ARR_F(ir_node *, 1); found->call_list[0] = n; found->max_depth = 0; pset_insert((pset *)irg->callees, found, hash_ptr(callee)); } unsigned depth = get_loop_depth(get_irn_loop(get_nodes_block(n))); found->max_depth = MAX(found->max_depth, depth); } } }
ir_node *new_rd_ASM(dbg_info *db, ir_node *block, ir_node *mem, int arity, ir_node *in[], ir_asm_constraint *inputs, size_t n_outs, ir_asm_constraint *outputs, size_t n_clobber, ident *clobber[], ident *text) { ir_graph *const irg = get_irn_irg(block); int const r_arity = arity + 1; ir_node **const r_in = ALLOCAN(ir_node*, r_arity); r_in[0] = mem; MEMCPY(&r_in[1], in, arity); ir_node *res = new_ir_node(db, irg, block, op_ASM, mode_T, r_arity, r_in); struct obstack *const obst = get_irg_obstack(irg); asm_attr *const a = &res->attr.assem; a->exc.pinned = true; a->input_constraints = NEW_ARR_D(ir_asm_constraint, obst, arity); a->output_constraints = NEW_ARR_D(ir_asm_constraint, obst, n_outs); a->clobbers = NEW_ARR_D(ident*, obst, n_clobber); a->text = text; MEMCPY(a->input_constraints, inputs, arity); MEMCPY(a->output_constraints, outputs, n_outs); MEMCPY(a->clobbers, clobber, n_clobber); verify_new_node(res); res = optimize_node(res); return res; }
void be_info_init_irn(ir_node *const node, arch_irn_flags_t const flags, arch_register_req_t const **const in_reqs, unsigned const n_res) { ir_graph *const irg = get_irn_irg(node); struct obstack *const obst = get_irg_obstack(irg); backend_info_t *const info = be_get_info(node); info->flags = flags; info->in_reqs = in_reqs; info->out_infos = NEW_ARR_DZ(reg_out_info_t, obst, n_res); }
ir_initializer_t *create_initializer_tarval(ir_tarval *tv) { struct obstack *obst = get_irg_obstack(get_const_code_irg()); ir_initializer_t *initializer = (ir_initializer_t*)OALLOC(obst, ir_initializer_tarval_t); initializer->kind = IR_INITIALIZER_TARVAL; initializer->tarval.value = tv; return initializer; }
ir_initializer_t *create_initializer_const(ir_node *value) { struct obstack *obst = get_irg_obstack(get_const_code_irg()); ir_initializer_t *initializer = (ir_initializer_t*)OALLOC(obst, ir_initializer_const_t); initializer->kind = IR_INITIALIZER_CONST; initializer->consti.value = value; return initializer; }
/** * Initializes the nodes attributes. */ static void init_sparc_attributes(ir_node *node, arch_irn_flags_t flags, const arch_register_req_t **in_reqs, int n_res) { arch_set_irn_flags(node, flags); arch_set_irn_register_reqs_in(node, in_reqs); backend_info_t *info = be_get_info(node); ir_graph *irg = get_irn_irg(node); struct obstack *obst = get_irg_obstack(irg); info->out_infos = NEW_ARR_DZ(reg_out_info_t, obst, n_res); }
ir_node *be_new_Phi(ir_node *block, int n_ins, ir_node **ins, arch_register_req_t const *req) { ir_graph *irg = get_irn_irg(block); ir_node *phi = new_ir_node(NULL, irg, block, op_Phi, req->cls->mode, n_ins, ins); phi->attr.phi.u.backedge = new_backedge_arr(get_irg_obstack(irg), n_ins); struct obstack *obst = be_get_be_obst(irg); backend_info_t *info = be_get_info(phi); info->out_infos = NEW_ARR_DZ(reg_out_info_t, obst, 1); info->in_reqs = be_allocate_in_reqs(irg, n_ins); info->out_infos[0].req = req; for (int i = 0; i < n_ins; ++i) { info->in_reqs[i] = req; } verify_new_node(phi); return optimize_node(phi); }
ir_node *x86_match_ASM(ir_node const *const node, x86_clobber_name_t const *const additional_clobber_names, x86_asm_constraint_list_t const *const constraints) { unsigned const n_operands = be_count_asm_operands(node); ir_graph *const irg = get_irn_irg(node); struct obstack *const obst = get_irg_obstack(irg); x86_asm_operand_t *const operands = NEW_ARR_DZ(x86_asm_operand_t, obst, n_operands); int const n_inputs = get_ASM_n_inputs(node); size_t const n_out_constraints = get_ASM_n_output_constraints(node); ir_asm_constraint const *const in_constraints = get_ASM_input_constraints(node); ir_asm_constraint const *const out_constraints = get_ASM_output_constraints(node); /* construct output constraints */ size_t const n_clobbers = get_ASM_n_clobbers(node); arch_register_req_t const **out_reqs = NEW_ARR_F(arch_register_req_t const*, 0); for (unsigned o = 0; o < n_out_constraints; ++o) { ir_asm_constraint const *const constraint = &out_constraints[o]; be_asm_constraint_t parsed_constraint; parse_asm_constraints(&parsed_constraint, constraints, constraint->constraint, true); arch_register_req_t const *const req = be_make_register_req(obst, &parsed_constraint, n_out_constraints, out_reqs, o); ARR_APP1(arch_register_req_t const*, out_reqs, req); x86_asm_operand_t *const op = &operands[constraint->pos]; set_operand_if_invalid(op, ASM_OP_OUT_REG, o, constraint); } /* parse clobbers */ unsigned clobber_bits[isa_if->n_register_classes]; memset(&clobber_bits, 0, sizeof(clobber_bits)); ident **const clobbers = get_ASM_clobbers(node); for (size_t c = 0; c < n_clobbers; ++c) { char const *const clobber = get_id_str(clobbers[c]); arch_register_t const *const reg = x86_parse_clobber(additional_clobber_names, clobber); if (reg != NULL) { assert(reg->cls->n_regs <= sizeof(unsigned) * 8); /* x87 registers may still be used as input, even if clobbered. */ if (reg->cls != &ia32_reg_classes[CLASS_ia32_fp]) clobber_bits[reg->cls->index] |= 1U << reg->index; ARR_APP1(arch_register_req_t const*, out_reqs, reg->single_req); } }
ir_node *be_complete_Phi(ir_node *const phi, unsigned const n_ins, ir_node **const ins) { assert(is_Phi(phi) && get_Phi_n_preds(phi) == 0); ir_graph *const irg = get_irn_irg(phi); phi->attr.phi.u.backedge = new_backedge_arr(get_irg_obstack(irg), n_ins); set_irn_in(phi, n_ins, ins); arch_register_req_t const **const in_reqs = be_allocate_in_reqs(irg, n_ins); arch_register_req_t const *const req = arch_get_irn_register_req(phi); for (unsigned i = 0; i < n_ins; ++i) { in_reqs[i] = req; } backend_info_t *const info = be_get_info(phi); info->in_reqs = in_reqs; verify_new_node(phi); return phi; }
ir_initializer_t *create_initializer_compound(size_t n_entries) { struct obstack *obst = get_irg_obstack(get_const_code_irg()); size_t size = sizeof(ir_initializer_compound_t) + n_entries * sizeof(ir_initializer_t*) - sizeof(ir_initializer_t*); ir_initializer_t *initializer = (ir_initializer_t*)obstack_alloc(obst, size); initializer->kind = IR_INITIALIZER_COMPOUND; initializer->compound.n_initializers = n_entries; for (size_t i = 0; i < n_entries; ++i) { initializer->compound.initializers[i] = get_initializer_null(); } return initializer; }
void mature_immBlock(ir_node *block) { if (get_Block_matured(block)) return; set_Block_matured(block, 1); /* Create final in-array for the block. */ ir_graph *const irg = get_irn_irg(block); if (block->attr.block.dynamic_ins) { /* Attach a Bad predecessor if there is no other. This is necessary to * fulfill the invariant that all nodes can be found through reverse * edges from the start block. */ ir_node **new_in; struct obstack *const obst = get_irg_obstack(irg); size_t n_preds = ARR_LEN(block->in) - 1; if (n_preds == 0) { n_preds = 1; new_in = NEW_ARR_D(ir_node*, obst, 2); new_in[0] = NULL; new_in[1] = new_r_Bad(irg, mode_X); } else {
/** * Computes the predecessors for the real phi node, and then * allocates and returns this node. The routine called to allocate the * node might optimize it away and return a real value. * This function must be called with an in-array of proper size. */ static ir_node *set_phi_arguments(ir_node *phi, int pos) { ir_node *block = get_nodes_block(phi); ir_graph *irg = get_irn_irg(block); int arity = get_irn_arity(block); ir_node **in = ALLOCAN(ir_node*, arity); ir_mode *mode = get_irn_mode(phi); /* This loop goes to all predecessor blocks of the block the Phi node * is in and there finds the operands of the Phi node by calling * get_r_value_internal. */ for (int i = 0; i < arity; ++i) { ir_node *cfgpred = get_Block_cfgpred_block(block, i); ir_node *value; if (cfgpred == NULL) { value = new_r_Bad(irg, mode); } else { value = get_r_value_internal(cfgpred, pos, mode); } in[i] = value; } phi->attr.phi.u.backedge = new_backedge_arr(get_irg_obstack(irg), arity); set_irn_in(phi, arity, in); verify_new_node(phi); try_remove_unnecessary_phi(phi); /* To solve the problem of (potentially) endless loops being observable * behaviour we add a keep-alive edge too all PhiM nodes. */ if (mode == mode_M && !is_Id(phi)) { phi->attr.phi.loop = true; keep_alive(phi); } return phi; }