/** * Calculate a weight for each argument of an entity. * * @param ent The entity of the ir_graph. */ static void analyze_method_params_weight(ir_entity *ent) { /* allocate a new array. currently used as 'analysed' flag */ ir_type *mtp = get_entity_type(ent); size_t nparams = get_method_n_params(mtp); ent->attr.mtd_attr.param_weight = NEW_ARR_F(unsigned, nparams); /* If the method haven't parameters we have nothing to do. */ if (nparams <= 0) return; /* First we initialize the parameter weights with 0. */ for (size_t i = nparams; i-- > 0; ) ent->attr.mtd_attr.param_weight[i] = null_weight; ir_graph *irg = get_entity_irg(ent); if (irg == NULL) { /* no graph, no better info */ return; } /* Call algorithm that computes the out edges */ assure_irg_outs(irg); ir_node *irg_args = get_irg_args(irg); for (int i = get_irn_n_outs(irg_args); i-- > 0; ) { ir_node *arg = get_irn_out(irg_args, i); long proj_nr = get_Proj_proj(arg); ent->attr.mtd_attr.param_weight[proj_nr] += calc_method_param_weight(arg); } }
/** * Check if a argument of the ir graph with mode * reference is read, write or both. * * @param irg The ir graph to analyze. */ static void analyze_ent_args(ir_entity *ent) { ir_type *mtp = get_entity_type(ent); size_t nparams = get_method_n_params(mtp); ent->attr.mtd_attr.param_access = NEW_ARR_F(ptr_access_kind, nparams); /* If the method haven't parameters we have * nothing to do. */ if (nparams <= 0) return; /* we have not yet analyzed the graph, set ALL access for pointer args */ for (size_t i = nparams; i-- > 0; ) { ir_type *type = get_method_param_type(mtp, i); ent->attr.mtd_attr.param_access[i] = is_Pointer_type(type) ? ptr_access_all : ptr_access_none; } ir_graph *irg = get_entity_irg(ent); if (irg == NULL) { /* no graph, no better info */ return; } assure_irg_outs(irg); ir_node *irg_args = get_irg_args(irg); /* A array to save the information for each argument with mode reference.*/ ptr_access_kind *rw_info; NEW_ARR_A(ptr_access_kind, rw_info, nparams); /* We initialize the element with none state. */ for (size_t i = nparams; i-- > 0; ) rw_info[i] = ptr_access_none; /* search for arguments with mode reference to analyze them.*/ for (int i = get_irn_n_outs(irg_args); i-- > 0; ) { ir_node *arg = get_irn_out(irg_args, i); ir_mode *arg_mode = get_irn_mode(arg); long proj_nr = get_Proj_proj(arg); if (mode_is_reference(arg_mode)) rw_info[proj_nr] |= analyze_arg(arg, rw_info[proj_nr]); } /* copy the temporary info */ memcpy(ent->attr.mtd_attr.param_access, rw_info, nparams * sizeof(ent->attr.mtd_attr.param_access[0])); }
static void transform_Proj_Alloc(ir_node *node) { /* we might need a result adjustment */ if (addr_delta == 0) return; if (get_Proj_proj(node) != pn_Alloc_res) return; if (ir_nodeset_contains(&transformed, node)) return; ir_node *const alloc = get_Proj_pred(node); dbg_info *const dbgi = get_irn_dbg_info(alloc); ir_graph *const irg = get_irn_irg(node); ir_node *const block = get_nodes_block(node); ir_node *const delta = new_r_Const_long(irg, mode_P, addr_delta); ir_node *const dummy = new_r_Dummy(irg, mode_P); ir_node *const add = new_rd_Add(dbgi, block, dummy, delta, mode_P); exchange(node, add); ir_node *const new_proj = new_r_Proj(alloc, mode_P, pn_Alloc_res); set_Add_left(add, new_proj); ir_nodeset_insert(&transformed, new_proj); }
/** * Emit a Compare with conditional branch. */ static void emit_amd64_Jcc(const ir_node *irn) { const ir_node *proj_true = NULL; const ir_node *proj_false = NULL; const ir_node *block; const ir_node *next_block; const char *suffix; const amd64_attr_t *attr = get_amd64_attr_const(irn); ir_relation relation = attr->ext.relation; ir_node *op1 = get_irn_n(irn, 0); const amd64_attr_t *cmp_attr = get_amd64_attr_const(op1); bool is_signed = !cmp_attr->data.cmp_unsigned; assert(is_amd64_Cmp(op1)); foreach_out_edge(irn, edge) { ir_node *proj = get_edge_src_irn(edge); long nr = get_Proj_proj(proj); if (nr == pn_Cond_true) { proj_true = proj; } else { proj_false = proj; } }
static void infer_typeinfo_walker(ir_node *irn, void *env) { bool *changed = (bool*) env; // A node's type needs only to be calculated once. if (get_irn_typeinfo_type(irn) != initial_type) return; if (is_Alloc(irn)) { // this one is easy, we know the exact dynamic type. ir_type *type = get_Alloc_type(irn); if (! is_Class_type(type)) return; set_irn_typeinfo_type(irn, type); *changed = true; } else if (is_Sel(irn) || is_SymConst_addr_ent(irn)) { // the type we determine here is the one of the entity we select or reference. // the transform_Sel method below will use the type incoming on the Sel_ptr input. ir_type *type = get_Sel_or_SymConst_type(irn); if (! type) return; ir_type *one_alive = get_alive_subclass(type); if (! one_alive) return; set_irn_typeinfo_type(irn, one_alive); *changed = true; } else if (is_Call(irn)) { // the dynamic type of the call result is the return type of the called entity. ir_node *call_pred = get_Call_ptr(irn); ir_type *pred_type = get_irn_typeinfo_type(call_pred); if (pred_type == initial_type) return; set_irn_typeinfo_type(irn, pred_type); *changed = true; } else if (is_Load(irn)) { // the dynamic type of the Load result is the type of the loaded entity. ir_node *load_pred = get_Load_ptr(irn); if (! is_Sel(load_pred) && !is_SymConst_addr_ent(load_pred)) return; ir_type *pred_type = get_irn_typeinfo_type(load_pred); if (pred_type == initial_type) return; set_irn_typeinfo_type(irn, pred_type); *changed = true; } else if (is_Proj(irn)) { // Types have to be propagated through Proj nodes (XXX: and also through Cast and Confirm ir_mode *pmode = get_irn_mode(irn); if (pmode != mode_P) return; ir_node *proj_pred = get_Proj_pred(irn); if (is_Proj(proj_pred) && get_irn_mode(proj_pred) == mode_T && get_Proj_proj(proj_pred) == pn_Call_T_result && is_Call(get_Proj_pred(proj_pred))) proj_pred = get_Proj_pred(proj_pred); // skip the result tuple ir_type *pred_type = get_irn_typeinfo_type(proj_pred); if (pred_type == initial_type) return; set_irn_typeinfo_type(irn, pred_type); *changed = true; } else if (is_Phi(irn)) { // Phi nodes are a special case because the incoming type information must be merged // A Phi node's type is unknown until all inputs are known to be the same dynamic type. ir_mode *pmode = get_irn_mode(irn); if (pmode != mode_P) return; int phi_preds = get_Phi_n_preds(irn); ir_type *last = NULL; for (int p = 0; p < phi_preds; p++) { ir_node *pred = get_Phi_pred(irn, p); ir_type *pred_type = get_irn_typeinfo_type(pred); if (pred_type == initial_type) return; if (p && last != pred_type) return; last = pred_type; } set_irn_typeinfo_type(irn, last); } }
/** * Compute the weight of a method parameter * * @param arg The parameter them weight muss be computed. */ static unsigned calc_method_param_weight(ir_node *arg) { /* We mark the nodes to avoid endless recursion */ mark_irn_visited(arg); unsigned weight = null_weight; for (int i = get_irn_n_outs(arg); i-- > 0; ) { ir_node *succ = get_irn_out(arg, i); if (irn_visited(succ)) continue; /* We should not walk over the memory edge.*/ if (get_irn_mode(succ) == mode_M) continue; switch (get_irn_opcode(succ)) { case iro_Call: if (get_Call_ptr(succ) == arg) { /* the arguments is used as an pointer input for a call, we can probably change an indirect Call into a direct one. */ weight += indirect_call_weight; } break; case iro_Cmp: { /* We have reached a cmp and we must increase the weight with the cmp_weight. */ ir_node *op; if (get_Cmp_left(succ) == arg) op = get_Cmp_right(succ); else op = get_Cmp_left(succ); if (is_irn_constlike(op)) { weight += const_cmp_weight; } else weight += cmp_weight; break; } case iro_Cond: /* the argument is used for a SwitchCond, a big win */ weight += const_cmp_weight * get_irn_n_outs(succ); break; case iro_Id: /* when looking backward we might find Id nodes */ weight += calc_method_param_weight(succ); break; case iro_Tuple: /* unoptimized tuple */ for (int j = get_Tuple_n_preds(succ); j-- > 0; ) { ir_node *pred = get_Tuple_pred(succ, j); if (pred == arg) { /* look for Proj(j) */ for (int k = get_irn_n_outs(succ); k-- > 0; ) { ir_node *succ_succ = get_irn_out(succ, k); if (is_Proj(succ_succ)) { if (get_Proj_proj(succ_succ) == j) { /* found */ weight += calc_method_param_weight(succ_succ); } } else { /* this should NOT happen */ } } } } break; default: if (is_binop(succ)) { /* We have reached a BinOp and we must increase the weight with the binop_weight. If the other operand of the BinOp is a constant we increase the weight with const_binop_weight and call the function recursive. */ ir_node *op; if (get_binop_left(succ) == arg) op = get_binop_right(succ); else op = get_binop_left(succ); if (is_irn_constlike(op)) { weight += const_binop_weight; weight += calc_method_param_weight(succ); } else weight += binop_weight; } else if (get_irn_arity(succ) == 1) { /* We have reached a binop and we must increase the weight with the const_binop_weight and call the function recursive.*/ weight += const_binop_weight; weight += calc_method_param_weight(succ); } break; } } set_irn_link(arg, NULL); return weight; }