void cg_set_call_callee_arr(ir_node *node, size_t n, ir_entity **arr) { assert(is_Call(node)); if (node->attr.call.callee_arr==NULL || cg_get_call_n_callees(node) != n) { ir_graph *const irg = get_irn_irg(node); node->attr.call.callee_arr = NEW_ARR_D(ir_entity*, get_irg_obstack(irg), n); }
/** * a vcg attribute hook */ static int stat_dag_mark_hook(FILE *F, const ir_node *n, const ir_node *l) { static const char *colors[] = { "purple", "pink", "lightblue", "orange", "khaki", "orchid", "lilac", "turquoise" }; dag_entry_t *entry; /* do not count Bad / NoMem */ if (l) { if (is_NoMem(l) || is_Bad(l)) return DEFAULT_RET; /* check for additional options */ if (mark_options & FIRMSTAT_LOAD_IS_LEAVE && is_Load(n)) return DEFAULT_RET; if (mark_options & FIRMSTAT_CALL_IS_LEAVE && is_Call(n)) return DEFAULT_RET; } entry = get_irn_dag_entry(n); if (! entry) return DEFAULT_RET; fprintf(F, "color: %s info3: \"DAG id: %u\"", colors[entry->id & 7], entry->id); /* I know the color! */ return COLOR_RET; }
/** * Pre-Walker called by compute_callgraph(), analyses all Call nodes. */ static void ana_Call(ir_node *n, void *env) { (void)env; if (!is_Call(n)) return; ir_graph *irg = get_irn_irg(n); for (size_t i = 0, n_callees = cg_get_call_n_callees(n); i < n_callees; ++i) { ir_entity *callee_e = cg_get_call_callee(n, i); ir_graph *callee = get_entity_linktime_irg(callee_e); if (callee) { cg_callee_entry buf; buf.irg = callee; pset_insert((pset *)callee->callers, irg, hash_ptr(irg)); cg_callee_entry *found = (cg_callee_entry*) pset_find((pset *)irg->callees, &buf, hash_ptr(callee)); if (found) { /* add Call node to list, compute new nesting. */ ir_node **arr = found->call_list; ARR_APP1(ir_node *, arr, n); found->call_list = arr; } else { /* New node, add Call node and init nesting. */ found = OALLOC(get_irg_obstack(irg), cg_callee_entry); found->irg = callee; found->call_list = NEW_ARR_F(ir_node *, 1); found->call_list[0] = n; found->max_depth = 0; pset_insert((pset *)irg->callees, found, hash_ptr(callee)); } unsigned depth = get_loop_depth(get_irn_loop(get_nodes_block(n))); found->max_depth = MAX(found->max_depth, depth); } } }
static void fix_address_pic_elf(ir_node *const node, void *const data) { (void)data; foreach_irn_in(node, i, pred) { if (!is_Address(pred)) continue; ir_entity *const entity = get_Address_entity(pred); if (is_tls_entity(entity)) continue; ir_graph *const irg = get_irn_irg(node); bool const ext_visible = is_externally_visible(entity); ir_node * res; if (i == n_Call_ptr && is_Call(node)) { /* We can compilation-unit local functions directly, everything else * goes through the PLT */ x86_immediate_kind_t const reloc = ext_visible ? X86_IMM_PLT : X86_IMM_PCREL; res = be_new_Relocation(irg, reloc, entity, mode_P); } else if (!ext_visible) { res = be_new_Relocation(irg, X86_IMM_PCREL, entity, mode_P); } else { res = create_gotpcrel_load(irg, entity); } set_irn_n(node, i, res); } }
static void fix_address_pic_mach_o(ir_node *const node, void *const data) { (void)data; foreach_irn_in(node, i, pred) { if (!is_Address(pred)) continue; ir_entity *const entity = get_Address_entity(pred); if (is_tls_entity(entity)) continue; ir_graph *const irg = get_irn_irg(node); ir_node * res; if (i == n_Call_ptr && is_Call(node)) { // Somehow we can always call PC relative. Are there trampolines // involved? res = be_new_Relocation(irg, X86_IMM_PCREL, entity, mode_P); } else if (entity_has_definition(entity) && !(get_entity_linkage(entity) & IR_LINKAGE_MERGE)) { res = be_new_Relocation(irg, X86_IMM_PCREL, entity, mode_P); } else { res = create_gotpcrel_load(irg, entity); } set_irn_n(node, i, res); } }
static void check_omit_fp(ir_node *node, void *env) { /* omit-fp is not possible if: * - we have allocations on the stack * - we have calls (with the exception of tail-calls once we support them) */ if (is_Alloc(node) || is_Free(node) || is_Call(node)) { bool *can_omit_fp = (bool*) env; *can_omit_fp = false; } }
/** patches Addresses to work in position independent code */ static void fix_pic_addresses(ir_node *const node, void *const data) { (void)data; ir_graph *const irg = get_irn_irg(node); be_main_env_t *const be = be_get_irg_main_env(irg); foreach_irn_in(node, i, pred) { if (!is_Address(pred)) continue; ir_node *res; ir_entity *const entity = get_Address_entity(pred); dbg_info *const dbgi = get_irn_dbg_info(pred); if (i == n_Call_ptr && is_Call(node)) { /* Calls can jump to relative addresses, so we can directly jump to * the (relatively) known call address or the trampoline */ if (can_address_relative(entity)) continue; ir_entity *const trampoline = get_trampoline(be, entity); res = new_rd_Address(dbgi, irg, trampoline); } else if (get_entity_type(entity) == get_code_type()) { /* Block labels can always be addressed directly. */ continue; } else { /* Everything else is accessed relative to EIP. */ ir_node *const block = get_nodes_block(pred); ir_mode *const mode = get_irn_mode(pred); ir_node *const pic_base = ia32_get_pic_base(irg); if (can_address_relative(entity)) { /* All ok now for locally constructed stuff. */ res = new_rd_Add(dbgi, block, pic_base, pred, mode); /* Make sure the walker doesn't visit this add again. */ mark_irn_visited(res); } else { /* Get entry from pic symbol segment. */ ir_entity *const pic_symbol = get_pic_symbol(be, entity); ir_node *const pic_address = new_rd_Address(dbgi, irg, pic_symbol); ir_node *const add = new_rd_Add(dbgi, block, pic_base, pic_address, mode); mark_irn_visited(add); /* We need an extra indirection for global data outside our current * module. The loads are always safe and can therefore float and * need no memory input */ ir_type *const type = get_entity_type(entity); ir_node *const nomem = get_irg_no_mem(irg); ir_node *const load = new_rd_Load(dbgi, block, nomem, add, mode, type, cons_floats); res = new_r_Proj(load, mode, pn_Load_res); } } set_irn_n(node, i, res); } }
ir_entity *detect_call(ir_node* call) { assert(is_Call(call)); ir_node *callee = get_irn_n(call, 1); if (is_Address(callee)) { ir_entity *entity = get_Address_entity(callee); if (entity == gcj_init_entity) { assert(get_irn_arity(call) == 3); ir_node *arg = get_irn_n(call, 2); assert(is_Address(arg)); ir_entity *rtti = get_Address_entity(arg); ir_type *klass = cpmap_find(&rtti2class, rtti); assert(klass); ir_entity *init_method = cpmap_find(&class2init, klass); //assert(init_method); // _Jv_InitClass calls can be there although class has no clinit return init_method; } // else if (entity == ...) } else assert(false); return NULL; }
size_t cg_get_call_n_callees(const ir_node *node) { assert(is_Call(node) && node->attr.call.callee_arr); return ARR_LEN(node->attr.call.callee_arr); }
int cg_call_has_callees(const ir_node *node) { assert(is_Call(node)); return ((get_irg_callee_info_state(get_irn_irg(node)) != irg_callee_info_none) && (node->attr.call.callee_arr != NULL)); }
static void infer_typeinfo_walker(ir_node *irn, void *env) { bool *changed = (bool*) env; // A node's type needs only to be calculated once. if (get_irn_typeinfo_type(irn) != initial_type) return; if (is_Alloc(irn)) { // this one is easy, we know the exact dynamic type. ir_type *type = get_Alloc_type(irn); if (! is_Class_type(type)) return; set_irn_typeinfo_type(irn, type); *changed = true; } else if (is_Sel(irn) || is_SymConst_addr_ent(irn)) { // the type we determine here is the one of the entity we select or reference. // the transform_Sel method below will use the type incoming on the Sel_ptr input. ir_type *type = get_Sel_or_SymConst_type(irn); if (! type) return; ir_type *one_alive = get_alive_subclass(type); if (! one_alive) return; set_irn_typeinfo_type(irn, one_alive); *changed = true; } else if (is_Call(irn)) { // the dynamic type of the call result is the return type of the called entity. ir_node *call_pred = get_Call_ptr(irn); ir_type *pred_type = get_irn_typeinfo_type(call_pred); if (pred_type == initial_type) return; set_irn_typeinfo_type(irn, pred_type); *changed = true; } else if (is_Load(irn)) { // the dynamic type of the Load result is the type of the loaded entity. ir_node *load_pred = get_Load_ptr(irn); if (! is_Sel(load_pred) && !is_SymConst_addr_ent(load_pred)) return; ir_type *pred_type = get_irn_typeinfo_type(load_pred); if (pred_type == initial_type) return; set_irn_typeinfo_type(irn, pred_type); *changed = true; } else if (is_Proj(irn)) { // Types have to be propagated through Proj nodes (XXX: and also through Cast and Confirm ir_mode *pmode = get_irn_mode(irn); if (pmode != mode_P) return; ir_node *proj_pred = get_Proj_pred(irn); if (is_Proj(proj_pred) && get_irn_mode(proj_pred) == mode_T && get_Proj_proj(proj_pred) == pn_Call_T_result && is_Call(get_Proj_pred(proj_pred))) proj_pred = get_Proj_pred(proj_pred); // skip the result tuple ir_type *pred_type = get_irn_typeinfo_type(proj_pred); if (pred_type == initial_type) return; set_irn_typeinfo_type(irn, pred_type); *changed = true; } else if (is_Phi(irn)) { // Phi nodes are a special case because the incoming type information must be merged // A Phi node's type is unknown until all inputs are known to be the same dynamic type. ir_mode *pmode = get_irn_mode(irn); if (pmode != mode_P) return; int phi_preds = get_Phi_n_preds(irn); ir_type *last = NULL; for (int p = 0; p < phi_preds; p++) { ir_node *pred = get_Phi_pred(irn, p); ir_type *pred_type = get_irn_typeinfo_type(pred); if (pred_type == initial_type) return; if (p && last != pred_type) return; last = pred_type; } set_irn_typeinfo_type(irn, last); } }
Worklist::Worklist(ir_graph* functionGraph, GraphHandler& handler): functionGraph(functionGraph), handler(handler) { typedef void (*ir_func)(ir_node*, void*); struct envMembers { std::queue<ir_node*>* pQueue; std::unordered_map<ir_node*, bool>* pIsQueued; }; envMembers envInstance; envInstance.pQueue = &this->worklist; envInstance.pIsQueued = &this->isQueued; ir_func addPhis = [](ir_node * node, void* env) { if (is_Phi(node)) { auto envInstance = (envMembers*)env; set_irn_link(node, (void*)tarval_unknown); envInstance->pQueue->push(node); (*envInstance->pIsQueued)[node] = true; } }; ir_func addToWorklist = [](ir_node * node, void* env) { if (is_Phi(node)) return; auto envInstance = (envMembers*)env; ir_tarval* tarval; auto isBadNode = [&] (Node node) -> bool { if (is_Call(node) || is_Load(node) || is_Start(node)) return true; /*else if (Node(node).getChildCount() > 0) { for (Node child : Node(node).getChildren()) if (child.getTarval() == tarval_bad) return true; }*/ return false; }; // TODO: Support other modes such as Bu, Lu if (is_Const(node) && Node(node).getTarval().isNumeric()) tarval = get_Const_tarval(node); else if (isBadNode(Node(node))) tarval = tarval_bad; else tarval = tarval_unknown; set_irn_link(node, (void*)tarval); envInstance->pQueue->push(node); (*envInstance->pIsQueued)[node] = true; }; walk_topological(functionGraph, addPhis, (void*)&envInstance); walk_topological(functionGraph, addToWorklist, (void*)&envInstance); }
/** * Pre-walker for connecting DAGs and counting. */ static void connect_dags(ir_node *node, void *env) { dag_env_t *dag_env = (dag_env_t*)env; int i, arity; ir_node *block; dag_entry_t *entry; ir_mode *mode; if (is_Block(node)) return; block = get_nodes_block(node); /* ignore start end end blocks */ ir_graph *const irg = get_Block_irg(block); if (block == get_irg_start_block(irg) || block == get_irg_end_block(irg)) return; /* ignore Phi nodes */ if (is_Phi(node)) return; if (dag_env->options & FIRMSTAT_ARGS_ARE_ROOTS && is_arg(node)) return; mode = get_irn_mode(node); if (mode == mode_X || mode == mode_M) { /* do NOT count mode_X and mode_M nodes */ return; } /* if this option is set, Loads are always leaves */ if (dag_env->options & FIRMSTAT_LOAD_IS_LEAVE && is_Load(node)) return; if (dag_env->options & FIRMSTAT_CALL_IS_LEAVE && is_Call(node)) return; entry = get_irn_dag_entry(node); if (! entry) { /* found an unassigned node, maybe a new root */ entry = new_dag_entry(dag_env, node); } /* put the predecessors into the same DAG as the current */ for (i = 0, arity = get_irn_arity(node); i < arity; ++i) { ir_node *prev = get_irn_n(node, i); ir_mode *mode = get_irn_mode(prev); if (is_Phi(prev)) continue; if (mode == mode_X || mode == mode_M) continue; /* * copy constants if requested into the DAG's * beware, do NOT add a link, as this will result in * wrong intersections */ if (dag_env->options & FIRMSTAT_COPY_CONSTANTS) { if (is_irn_constlike(prev)) { ++entry->num_nodes; ++entry->num_inner_nodes; } } /* only nodes from the same block goes into the DAG */ if (get_nodes_block(prev) == block) { dag_entry_t *prev_entry = get_irn_dag_entry(prev); if (! prev_entry) { /* not assigned node, put it into the same DAG */ set_irn_dag_entry(prev, entry); ++entry->num_nodes; ++entry->num_inner_nodes; } else { if (prev_entry == entry) { /* We found a node that is already assigned to this DAG. This DAG is not a tree. */ entry->is_tree = 0; } else { /* two DAGs intersect: copy the data to one of them and kill the other */ entry->num_roots += prev_entry->num_roots; entry->num_nodes += prev_entry->num_nodes; entry->num_inner_nodes += prev_entry->num_inner_nodes; entry->is_tree &= prev_entry->is_tree; --dag_env->num_of_dags; prev_entry->is_dead = 1; prev_entry->link = entry; } } } } }