ir_node *gcji_allocate_array(ir_type *eltype, ir_node *count) { ir_node *jclass = gcji_get_runtime_classinfo(eltype); ir_node *res; ir_node *new_mem; if (is_Primitive_type(eltype)) { ir_node *addr = new_Address(gcj_new_prim_array_entity); ir_node *args[] = { jclass, count }; ir_type *call_type = get_entity_type(gcj_new_prim_array_entity); ir_node *mem = get_store(); ir_node *call = new_Call(mem, addr, ARRAY_SIZE(args), args, call_type); ir_node *ress = new_Proj(call, mode_T, pn_Call_T_result); new_mem = new_Proj(call, mode_M, pn_Call_M); res = new_r_Proj(ress, mode_reference, 0); } else { ir_node *addr = new_Address(gcj_new_object_array_entity); ir_node *null = new_Const(get_mode_null(mode_reference)); ir_node *args[] = { count, jclass, null }; ir_type *call_type = get_entity_type(gcj_new_object_array_entity); ir_node *mem = get_store(); ir_node *call = new_Call(mem, addr, ARRAY_SIZE(args), args, call_type); ir_node *ress = new_Proj(call, mode_T, pn_Call_T_result); new_mem = new_Proj(call, mode_M, pn_Call_M); res = new_Proj(ress, mode_reference, 0); } ir_node *assure_vptr = new_VptrIsSet(new_mem, res, type_jarray); ir_node *new_mem2 = new_Proj(assure_vptr, mode_M, pn_VptrIsSet_M); ir_node *res2 = new_Proj(assure_vptr, mode_reference, pn_VptrIsSet_res); set_store(new_mem2); return res2; }
ir_node *get_atomic_ent_value(const ir_entity *entity) { ir_initializer_t *initializer = get_entity_initializer(entity); assert(is_atomic_entity(entity)); if (initializer == NULL) { ir_type *type = get_entity_type(entity); return new_r_Unknown(get_const_code_irg(), get_type_mode(type)); } switch (get_initializer_kind(initializer)) { case IR_INITIALIZER_NULL: { ir_type *type = get_entity_type(entity); ir_mode *mode = get_type_mode(type); return new_r_Const(get_const_code_irg(), get_mode_null(mode)); } case IR_INITIALIZER_TARVAL: { ir_tarval *tv = get_initializer_tarval_value(initializer); return new_r_Const(get_const_code_irg(), tv); } case IR_INITIALIZER_CONST: return get_initializer_const_value(initializer); case IR_INITIALIZER_COMPOUND: panic("compound initializer in atomic entity not allowed (%+F)", entity); } panic("invalid initializer kind (%+F)", entity); }
/** patches Addresses to work in position independent code */ static void fix_pic_addresses(ir_node *const node, void *const data) { (void)data; ir_graph *const irg = get_irn_irg(node); be_main_env_t *const be = be_get_irg_main_env(irg); foreach_irn_in(node, i, pred) { if (!is_Address(pred)) continue; ir_node *res; ir_entity *const entity = get_Address_entity(pred); dbg_info *const dbgi = get_irn_dbg_info(pred); if (i == n_Call_ptr && is_Call(node)) { /* Calls can jump to relative addresses, so we can directly jump to * the (relatively) known call address or the trampoline */ if (can_address_relative(entity)) continue; ir_entity *const trampoline = get_trampoline(be, entity); res = new_rd_Address(dbgi, irg, trampoline); } else if (get_entity_type(entity) == get_code_type()) { /* Block labels can always be addressed directly. */ continue; } else { /* Everything else is accessed relative to EIP. */ ir_node *const block = get_nodes_block(pred); ir_mode *const mode = get_irn_mode(pred); ir_node *const pic_base = ia32_get_pic_base(irg); if (can_address_relative(entity)) { /* All ok now for locally constructed stuff. */ res = new_rd_Add(dbgi, block, pic_base, pred, mode); /* Make sure the walker doesn't visit this add again. */ mark_irn_visited(res); } else { /* Get entry from pic symbol segment. */ ir_entity *const pic_symbol = get_pic_symbol(be, entity); ir_node *const pic_address = new_rd_Address(dbgi, irg, pic_symbol); ir_node *const add = new_rd_Add(dbgi, block, pic_base, pic_address, mode); mark_irn_visited(add); /* We need an extra indirection for global data outside our current * module. The loads are always safe and can therefore float and * need no memory input */ ir_type *const type = get_entity_type(entity); ir_node *const nomem = get_irg_no_mem(irg); ir_node *const load = new_rd_Load(dbgi, block, nomem, add, mode, type, cons_floats); res = new_r_Proj(load, mode, pn_Load_res); } } set_irn_n(node, i, res); } }
calling_convention_t *sparc_prepare_calling_convention(ir_graph *const irg) { ir_entity *const entity = get_irg_entity(irg); ir_type *const non_lowered = get_entity_type(entity); calling_convention_t *cconv = sparc_decide_calling_convention(get_entity_type(entity), irg); if (sparc_variadic_fixups(irg, cconv)) { sparc_free_calling_convention(cconv); cconv = sparc_decide_calling_convention(get_entity_type(entity), irg); } sparc_layout_param_entities(irg, cconv, non_lowered); return cconv; }
/** * Calculate a weight for each argument of an entity. * * @param ent The entity of the ir_graph. */ static void analyze_method_params_weight(ir_entity *ent) { /* allocate a new array. currently used as 'analysed' flag */ ir_type *mtp = get_entity_type(ent); size_t nparams = get_method_n_params(mtp); ent->attr.mtd_attr.param_weight = NEW_ARR_F(unsigned, nparams); /* If the method haven't parameters we have nothing to do. */ if (nparams <= 0) return; /* First we initialize the parameter weights with 0. */ for (size_t i = nparams; i-- > 0; ) ent->attr.mtd_attr.param_weight[i] = null_weight; ir_graph *irg = get_entity_irg(ent); if (irg == NULL) { /* no graph, no better info */ return; } /* Call algorithm that computes the out edges */ assure_irg_outs(irg); ir_node *irg_args = get_irg_args(irg); for (int i = get_irn_n_outs(irg_args); i-- > 0; ) { ir_node *arg = get_irn_out(irg_args, i); long proj_nr = get_Proj_proj(arg); ent->attr.mtd_attr.param_weight[proj_nr] += calc_method_param_weight(arg); } }
ir_node *gcji_lookup_interface(ir_node *objptr, ir_type *iface, ir_entity *method, ir_graph *irg, ir_node *block, ir_node **mem) { ir_node *cur_mem = *mem; // we need the reference to the object's class$ field // first, dereference the vptr in order to get the vtable address. ir_entity *vptr_entity = get_vptr_entity(); ir_type *vptr_type = get_entity_type(vptr_entity); ir_node *vptr_addr = new_r_Member(block, objptr, vptr_entity); ir_node *vptr_load = new_r_Load(block, cur_mem, vptr_addr, mode_reference, vptr_type, cons_none); ir_node *vtable_addr = new_r_Proj(vptr_load, mode_reference, pn_Load_res); cur_mem = new_r_Proj(vptr_load, mode_M, pn_Load_M); // second, dereference vtable_addr (it points to the slot where the address of the class$ field is stored). ir_node *cd_load = new_r_Load(block, cur_mem, vtable_addr, mode_reference, vptr_type, cons_none); ir_node *cd_ref = new_r_Proj(cd_load, mode_reference, pn_Load_res); cur_mem = new_r_Proj(cd_load, mode_M, pn_Load_M); class_t *linked_class = (class_t*) oo_get_type_link(iface); method_t *linked_method = (method_t*) oo_get_entity_link(method); assert(linked_class && linked_method); constant_t *name_const = linked_class->constants[linked_method->name_index]; ir_entity *name_const_ent= gcji_emit_utf8_const(name_const, 1); ir_node *name_ref = new_r_Address(irg, name_const_ent); constant_t *desc_const = linked_class->constants[linked_method->descriptor_index]; ir_entity *desc_const_ent= gcji_emit_utf8_const(desc_const, 1); ir_node *desc_ref = new_r_Address(irg, desc_const_ent); ir_node *callee = new_r_Address(irg, gcj_lookup_interface_entity); ir_node *args[3] = { cd_ref, name_ref, desc_ref }; ir_type *call_type = get_entity_type(gcj_lookup_interface_entity); ir_node *call = new_r_Call(block, cur_mem, callee, 3, args, call_type); cur_mem = new_r_Proj(call, mode_M, pn_Call_M); ir_node *ress = new_r_Proj(call, mode_T, pn_Call_T_result); ir_node *res = new_r_Proj(ress, mode_reference, 0); *mem = cur_mem; return res; }
void gcji_checkcast(ir_type *classtype, ir_node *objptr) { ir_node *jclass = gcji_get_runtime_classinfo(classtype); ir_node *addr = new_Address(gcj_checkcast_entity); ir_type *call_type = get_entity_type(gcj_checkcast_entity); ir_node *args[] = { jclass, objptr }; ir_node *mem = get_store(); ir_node *call = new_Call(mem, addr, ARRAY_SIZE(args), args, call_type); ir_node *new_mem = new_Proj(call, mode_M, pn_Call_M); set_store(new_mem); }
static ir_node *create_gotpcrel_load(ir_graph *irg, ir_entity *const entity) { ir_node *const addr = be_new_Relocation(irg, X86_IMM_GOTPCREL, entity, mode_P); ir_type *const type = get_entity_type(entity); ir_node *const nomem = get_irg_no_mem(irg); ir_node *const block = get_irg_start_block(irg); ir_node *const load = new_rd_Load(NULL, block, nomem, addr, mode_P, type, cons_floats); return new_r_Proj(load, mode_P, pn_Load_res); }
int bootstrap(call_t *c) { struct nodedata *nodedata = get_node_private_data(c); entityid_t *down = get_entity_links_down(c); call_t c0 = {down[0], c->node, c->entity}; uint64_t schedule = get_time() + nodedata->h_start + get_random_double() * nodedata->h_period; /* get overhead */ if ((get_entity_type(&c0) != MODELTYPE_ROUTING) && (get_entity_type(&c0) != MODELTYPE_MAC)) { nodedata->overhead = 0; } else { nodedata->overhead = GET_HEADER_SIZE(&c0); } /* scheduler first hello */ if (nodedata->h_nbr == -1 || nodedata->h_nbr > 0) { scheduler_add_callback(schedule, c, hello_callback, NULL); } return 0; }
/** * Create a trampoline entity for the given method. */ static ir_entity *create_trampoline(be_main_env_t *be, ir_entity *method) { ir_type *type = get_entity_type(method); ident *old_id = get_entity_ld_ident(method); ident *id = new_id_fmt("%s$stub", old_id); ir_type *parent = be->pic_trampolines_type; ir_entity *ent = new_entity(parent, old_id, type); set_entity_ld_ident(ent, id); set_entity_visibility(ent, ir_visibility_private); return ent; }
/** * Check if a argument of the ir graph with mode * reference is read, write or both. * * @param irg The ir graph to analyze. */ static void analyze_ent_args(ir_entity *ent) { ir_type *mtp = get_entity_type(ent); size_t nparams = get_method_n_params(mtp); ent->attr.mtd_attr.param_access = NEW_ARR_F(ptr_access_kind, nparams); /* If the method haven't parameters we have * nothing to do. */ if (nparams <= 0) return; /* we have not yet analyzed the graph, set ALL access for pointer args */ for (size_t i = nparams; i-- > 0; ) { ir_type *type = get_method_param_type(mtp, i); ent->attr.mtd_attr.param_access[i] = is_Pointer_type(type) ? ptr_access_all : ptr_access_none; } ir_graph *irg = get_entity_irg(ent); if (irg == NULL) { /* no graph, no better info */ return; } assure_irg_outs(irg); ir_node *irg_args = get_irg_args(irg); /* A array to save the information for each argument with mode reference.*/ ptr_access_kind *rw_info; NEW_ARR_A(ptr_access_kind, rw_info, nparams); /* We initialize the element with none state. */ for (size_t i = nparams; i-- > 0; ) rw_info[i] = ptr_access_none; /* search for arguments with mode reference to analyze them.*/ for (int i = get_irn_n_outs(irg_args); i-- > 0; ) { ir_node *arg = get_irn_out(irg_args, i); ir_mode *arg_mode = get_irn_mode(arg); long proj_nr = get_Proj_proj(arg); if (mode_is_reference(arg_mode)) rw_info[proj_nr] |= analyze_arg(arg, rw_info[proj_nr]); } /* copy the temporary info */ memcpy(ent->attr.mtd_attr.param_access, rw_info, nparams * sizeof(ent->attr.mtd_attr.param_access[0])); }
static ir_entity *create_pic_symbol(be_main_env_t *be, ir_entity *entity) { ident *old_id = get_entity_ld_ident(entity); ident *id = new_id_fmt("%s$non_lazy_ptr", old_id); ir_type *e_type = get_entity_type(entity); ir_type *type = new_type_pointer(e_type); ir_type *parent = be->pic_symbols_type; ir_entity *ent = new_entity(parent, old_id, type); set_entity_ld_ident(ent, id); set_entity_visibility(ent, ir_visibility_private); return ent; }
void gcji_class_init(ir_type *type) { assert(is_Class_type(type)); ir_node *addr = new_Address(gcj_init_entity); ir_node *jclass = gcji_get_runtime_classinfo(type); ir_node *args[] = { jclass }; ir_type *call_type = get_entity_type(gcj_init_entity); ir_node *mem = get_store(); ir_node *call = new_Call(mem, addr, ARRAY_SIZE(args), args, call_type); ir_node *new_mem = new_Proj(call, mode_M, pn_Call_M); set_store(new_mem); }
static ir_node *gcji_get_arrayclass(ir_node *block, ir_node **mem, ir_node *array_class_ref) { ir_graph *irg = get_irn_irg(block); ir_node *addr = new_r_Address(irg, gcj_get_array_class_entity); ir_node *null = new_r_Const(irg, get_mode_null(mode_reference)); ir_node *args[] = { array_class_ref, null }; ir_type *call_type = get_entity_type(gcj_get_array_class_entity); ir_node *call = new_r_Call(block, *mem, addr, ARRAY_SIZE(args), args, call_type); ir_node *new_mem = new_Proj(call, mode_M, pn_Call_M); ir_node *ress = new_Proj(call, mode_T, pn_Call_T_result); ir_node *res = new_Proj(ress, mode_reference, 0); *mem = new_mem; return res; }
ir_node *gcji_new_multiarray(ir_node *array_class_ref, unsigned dims, ir_node **sizes) { ir_node *addr = new_Address(gcj_new_multiarray_entity); ir_node *dims_arr = alloc_dims_array(dims, sizes); ir_node *cnst = new_Const_long(mode_int, dims); ir_node *args[] = { array_class_ref, cnst, dims_arr }; ir_type *call_type = get_entity_type(gcj_new_multiarray_entity); ir_node *mem = get_store(); ir_node *call = new_Call(mem, addr, ARRAY_SIZE(args), args, call_type); ir_node *new_mem = new_Proj(call, mode_M, pn_Call_M); ir_node *ress = new_Proj(call, mode_T, pn_Call_T_result); ir_node *res = new_Proj(ress, mode_reference, 0); set_store(new_mem); return res; }
void eh_lower_Raise(ir_node *raise, ir_node *proj) { assert (is_Raise(raise) && is_Proj(proj)); ir_node *ex_obj = get_Raise_exo_ptr(raise); ir_node *block = get_nodes_block(raise); ir_graph *irg = get_irn_irg(raise); ir_node *cur_mem = get_Raise_mem(raise); ir_node *c_symc = new_r_SymConst(irg, throw_entity); ir_node *in[1] = { ex_obj }; ir_node *throw = new_r_Call(block, cur_mem, c_symc, 1, in, get_entity_type(throw_entity)); ir_set_throws_exception(throw, 1); exchange(raise, throw); set_Proj_num(proj, pn_Call_X_except); }
ir_node *dmemory_default_alloc_array(ir_type *eltype, ir_node *count, ir_graph *irg, ir_node *block, ir_node **mem) { ir_node *cur_mem = *mem; unsigned count_size = get_mode_size_bytes(default_arraylength_mode); unsigned element_size = is_Class_type(eltype) ? get_mode_size_bytes(mode_P) : get_type_size_bytes(eltype); // FIXME: some langs support arrays of structs. /* increase element count so we have enough space for a counter at the front */ unsigned add_size = (element_size + (count_size-1)) / count_size; ir_node *count_u = new_r_Conv(block, count, mode_Iu); ir_node *addv = new_r_Const_long(irg, mode_Iu, add_size); ir_node *add1 = new_r_Add(block, count_u, addv, mode_Iu); ir_node *elsizev = new_r_Const_long(irg, mode_Iu, element_size); ir_node *size = new_r_Mul(block, add1, elsizev, mode_Iu); unsigned addr_delta = add_size * element_size; symconst_symbol calloc_sym; calloc_sym.entity_p = calloc_entity; ir_node *callee = new_r_SymConst(irg, mode_P, calloc_sym, symconst_addr_ent); ir_node *one = new_r_Const_long(irg, mode_Iu, 1); ir_node *in[2] = { one, size }; ir_type *call_type = get_entity_type(calloc_entity); ir_node *call = new_r_Call(block, cur_mem, callee, 2, in, call_type); cur_mem = new_r_Proj(call, mode_M, pn_Call_M); ir_node *ress = new_r_Proj(call, mode_T, pn_Call_T_result); ir_node *res = new_r_Proj(ress, mode_P, 0); /* write length of array */ ir_node *len_value = new_r_Conv(block, count, default_arraylength_mode); ir_node *len_delta = new_r_Const_long(irg, mode_P, (int)addr_delta-4); //FIXME: replace magic num ir_node *len_addr = new_r_Add(block, res, len_delta, mode_P); ir_node *store = new_r_Store(block, cur_mem, len_addr, len_value, cons_none); cur_mem = new_r_Proj(store, mode_M, pn_Store_M); if (addr_delta > 0) { ir_node *delta = new_r_Const_long(irg, mode_P, (int)addr_delta); res = new_r_Add(block, res, delta, mode_P); } *mem = cur_mem; return res; }
static ir_node *gcji_instanceof(ir_node *objptr, ir_type *classtype, ir_graph *irg, ir_node *block, ir_node **mem) { ir_node *jclass = gcji_get_runtime_classinfo_(block, mem, classtype); ir_node *addr = new_r_Address(irg, gcj_instanceof_entity); ir_node *args[] = { objptr, jclass }; ir_type *call_type = get_entity_type(gcj_instanceof_entity); ir_node *call = new_r_Call(block, *mem, addr, ARRAY_SIZE(args), args, call_type); ir_node *new_mem = new_r_Proj(call, mode_M, pn_Call_M); ir_node *ress = new_r_Proj(call, mode_T, pn_Call_T_result); ir_node *call_res = new_r_Proj(ress, mode_int, 0); ir_node *zero = new_r_Const(irg, get_mode_null(mode_int)); ir_node *res = new_r_Cmp(block, call_res, zero, ir_relation_less_greater); *mem = new_mem; return res; }
ir_node *gcji_new_string(ir_entity *bytes) { ir_node *addr = new_Address(gcj_new_string_entity); ir_node *string_symc = new_Address(bytes); ir_node *args[] = { string_symc }; ir_node *mem = get_store(); ir_type *call_type = get_entity_type(gcj_new_string_entity); ir_node *call = new_Call(mem, addr, ARRAY_SIZE(args), args, call_type); ir_node *new_mem = new_Proj(call, mode_M, pn_Call_M); ir_node *ress = new_Proj(call, mode_T, pn_Call_T_result); ir_node *res = new_Proj(ress, mode_reference, 0); set_store(new_mem); // TODO: get type for java.lang.String from somewhere and use VptrIsSet // on the result return res; }
static void sparc_layout_param_entities(ir_graph *const irg, calling_convention_t *const cconv, ir_type *const non_lowered) { ir_entity **const param_map = be_collect_parameter_entities(irg); ir_type *const frame_type = get_irg_frame_type(irg); size_t const n_params = cconv->n_parameters; /* calculate offsets/create missing entities */ for (size_t i = 0; i < n_params; ++i) { reg_or_stackslot_t *const param = &cconv->parameters[i]; ir_entity * entity = param_map[i]; if (entity == NULL) { if (!param->already_stored) continue; entity = new_parameter_entity(frame_type, i, param->type); } param->entity = entity; set_entity_offset(entity, param->offset); } ir_entity *const function = get_irg_entity(irg); ir_type *const function_type = get_entity_type(function); if (is_method_variadic(function_type)) { ir_type *unknown = get_unknown_type(); ident *id = new_id_from_str("$va_start"); ir_entity *va_start_addr = new_entity(frame_type, id, unknown); /* sparc_variadic_fixups() fiddled with our type, find out the * original number of parameters */ size_t const orig_n_params = get_method_n_params(non_lowered); long offset; if (orig_n_params < n_params) { assert(param_map[orig_n_params] != NULL); offset = get_entity_offset(param_map[orig_n_params]); } else { offset = cconv->param_stack_size + SPARC_MIN_STACKSIZE; } set_entity_offset(va_start_addr, offset); cconv->va_start_addr = va_start_addr; } free(param_map); }
ir_node *gcji_allocate_object(ir_type *type) { assert(is_Class_type(type)); ir_node *addr = new_Address(gcj_alloc_entity); ir_node *jclass = gcji_get_runtime_classinfo(type); ir_node *args[] = { jclass }; ir_type *call_type = get_entity_type(gcj_alloc_entity); ir_node *mem = get_store(); ir_node *call = new_Call(mem, addr, ARRAY_SIZE(args), args, call_type); ir_node *new_mem = new_Proj(call, mode_M, pn_Call_M); ir_node *ress = new_Proj(call, mode_T, pn_Call_T_result); ir_node *res = new_Proj(ress, mode_reference, 0); ir_node *assure_vptr = new_VptrIsSet(new_mem, res, type); ir_node *new_mem2 = new_Proj(assure_vptr, mode_M, pn_VptrIsSet_M); ir_node *res2 = new_Proj(assure_vptr, mode_reference, pn_VptrIsSet_res); set_store(new_mem2); return res2; }
static ir_initializer_t *get_field_desc(ir_type *classtype, ir_entity *ent) { class_t *linked_class = (class_t*) oo_get_type_link(classtype); field_t *linked_field = (field_t*) oo_get_entity_link(ent); assert(linked_class && linked_field); constant_t *name_const = linked_class->constants[linked_field->name_index]; ir_entity *name_ent = gcji_emit_utf8_const(name_const, 1); ir_type *field_type = get_entity_type(ent); ir_entity *rtti_entity = gcji_get_rtti_entity(field_type); if (rtti_entity == NULL) { rtti_entity = emit_type_signature(field_type); } ir_graph *ccode = get_const_code_irg(); ir_node *bsize = new_r_Size(ccode, mode_ushort, field_type); ir_initializer_t *offset_addr_init = create_initializer_compound(2); if (linked_field->access_flags & ACCESS_FLAG_STATIC) { set_compound_init_null(offset_addr_init, 0); set_compound_init_entref(offset_addr_init, 1, ent); } else { ir_node *offset = new_r_Offset(ccode, mode_int, ent); set_compound_init_node(offset_addr_init, 0, offset); set_compound_init_null(offset_addr_init, 1); } unsigned NUM_FIELDS = 5; ir_initializer_t *init = create_initializer_compound(NUM_FIELDS); size_t f = 0; set_compound_init_entref(init, f++, name_ent); set_compound_init_entref(init, f++, rtti_entity); set_compound_init_num(init, f++, mode_ushort, linked_field->access_flags); set_compound_init_node(init, f++, bsize); set_initializer_compound_value(init, f++, offset_addr_init); assert(f == NUM_FIELDS); return init; }
/** * Perform some fixups for variadic functions. * To make the rest of the frontend code easier to understand we add * "dummy" parameters until the number of parameters transmitted in registers. * (because otherwise the backend wouldn't store the value of the register * parameters into memory for the VLA magic) */ static bool sparc_variadic_fixups(ir_graph *const irg, calling_convention_t *const cconv) { ir_entity *entity = get_irg_entity(irg); ir_type *mtp = get_entity_type(entity); if (!is_method_variadic(mtp)) return false; if (cconv->n_param_regs >= SPARC_N_PARAM_REGS) return false; size_t const n_params = get_method_n_params(mtp); size_t const n_ress = get_method_n_ress(mtp); size_t const new_n_params = n_params + (SPARC_N_PARAM_REGS - cconv->n_param_regs); unsigned const cc_mask = get_method_calling_convention(mtp); mtp_additional_properties const props = get_method_additional_properties(mtp); ir_type *const new_mtp = new_type_method(new_n_params, n_ress, true, cc_mask, props); type_dbg_info *const dbgi = get_type_dbg_info(mtp); set_type_dbg_info(new_mtp, dbgi); for (size_t i = 0; i < n_ress; ++i) { ir_type *type = get_method_res_type(mtp, i); set_method_res_type(new_mtp, i, type); } for (size_t i = 0; i < n_params; ++i) { ir_type *type = get_method_param_type(mtp, i); set_method_param_type(new_mtp, i, type); } ir_type *const frame_type = get_irg_frame_type(irg); ir_mode *const gp_reg_mode = sparc_reg_classes[CLASS_sparc_gp].mode; ir_type *const gp_reg_type = get_type_for_mode(gp_reg_mode); for (size_t i = n_params; i < new_n_params; ++i) { set_method_param_type(new_mtp, i, gp_reg_type); new_parameter_entity(frame_type, i, gp_reg_type); } set_entity_type(entity, new_mtp); return true; }
ir_node *dmemory_default_alloc_object(ir_type *type, ir_graph *irg, ir_node *block, ir_node **mem) { ir_node *cur_mem = *mem; symconst_symbol type_sym; type_sym.type_p = type; ir_node *size = new_r_SymConst(irg, mode_Iu, type_sym, symconst_type_size); symconst_symbol calloc_sym; calloc_sym.entity_p = calloc_entity; ir_node *callee = new_r_SymConst(irg, mode_P, calloc_sym, symconst_addr_ent); ir_node *one = new_r_Const_long(irg, mode_Iu, 1); ir_node *in[2] = { one, size }; ir_type *call_type = get_entity_type(calloc_entity); ir_node *call = new_r_Call(block, cur_mem, callee, 2, in, call_type); cur_mem = new_r_Proj(call, mode_M, pn_Call_M); ir_node *ress = new_r_Proj(call, mode_T, pn_Call_T_result); ir_node *res = new_r_Proj(ress, mode_P, 0); *mem = cur_mem; return res; }
ptr_access_kind get_method_param_access(ir_entity *ent, size_t pos) { #ifndef NDEBUG ir_type *mtp = get_entity_type(ent); bool is_variadic = get_method_variadicity(mtp) == variadicity_variadic; assert(is_variadic || pos < get_method_n_params(mtp)); #endif if (ent->attr.mtd_attr.param_access) { if (pos < ARR_LEN(ent->attr.mtd_attr.param_access)) return ent->attr.mtd_attr.param_access[pos]; else return ptr_access_all; } analyze_ent_args(ent); if (pos < ARR_LEN(ent->attr.mtd_attr.param_access)) return ent->attr.mtd_attr.param_access[pos]; else return ptr_access_all; }
/* * Transform Sel[method] to SymC[method] if possible. * (see opt_polymorphy in libfirm) */ static ir_node *transform_node_Sel2(ir_node *node) { ir_node *new_node; ir_entity *ent = get_Sel_entity(node); if (get_irp_phase_state() == phase_building) return node; if (!get_opt_dyn_meth_dispatch()) return node; if (!is_Method_type(get_entity_type(ent))) return node; ddispatch_binding bind = oo_get_entity_binding(ent); assert (bind != bind_unknown); if (bind == bind_static) return node; /* If we know the dynamic type, we can replace the Sel by a constant. */ ir_node *ptr = get_Sel_ptr(node); /* The address we select from. */ ir_type *dyn_tp = get_irn_typeinfo_type(ptr); if (dyn_tp != initial_type) { ir_entity *called_ent; /* We know which method will be called, no dispatch necessary. */ called_ent = resolve_ent_polymorphy(dyn_tp, ent); assert (! oo_get_method_is_abstract(called_ent)); assert (! oo_get_class_is_interface(get_entity_owner(called_ent))); new_node = copy_const_value(get_irn_dbg_info(node), get_atomic_ent_value(called_ent), get_nodes_block(node)); return new_node; } return node; }
static ir_type *get_Sel_or_SymConst_type(ir_node *sos) { assert (is_Sel(sos) || is_SymConst_addr_ent(sos)); ir_entity *entity = get_irn_entity_attr(sos); ir_type *type = get_entity_type(entity); if (is_Method_type(type)) { size_t n_ress = get_method_n_ress(type); if (n_ress == 0) return NULL; assert (n_ress == 1); type = get_method_res_type(type, 0); } if (is_Pointer_type(type)) type = get_pointer_points_to_type(type); if (is_Class_type(type)) return type; return NULL; }
static void check_entity_initializer(ir_entity *entity) { #ifndef NDEBUG ir_initializer_t *initializer = entity->initializer; ir_type *entity_tp = get_entity_type(entity); switch (initializer->kind) { case IR_INITIALIZER_COMPOUND: assert(is_compound_type(entity_tp) || is_Array_type(entity_tp)); break; case IR_INITIALIZER_CONST: /* methods are initialized by a SymConst */ assert(is_atomic_type(entity_tp) || is_Method_type(entity_tp)); break; case IR_INITIALIZER_TARVAL: assert(is_atomic_type(entity_tp)); break; case IR_INITIALIZER_NULL: break; } #else (void)entity; #endif }
cpp_access_specifier(cpp_cursor cur, const cpp_entity& parent, cpp_access_specifier_t a) : cpp_entity(get_entity_type(), cur, parent), access_(a) { }
static void lower_divmod(ir_node *node, ir_node *left, ir_node *right, ir_node *mem, ir_mode *mode, int res_offset) { dbg_info *dbgi = get_irn_dbg_info(node); ir_node *block = get_nodes_block(node); ir_node *left_low = get_lowered_low(left); ir_node *left_high = get_lowered_high(left); ir_node *right_low = get_lowered_low(right); ir_node *right_high = get_lowered_high(right); ir_mode *node_mode = get_irn_mode(left); ir_entity *entity = mode_is_signed(node_mode) ? ldivmod : uldivmod; ir_type *mtp = get_entity_type(entity); ir_graph *irg = get_irn_irg(node); ir_node *addr = new_r_Address(irg, entity); ir_node *in[4]; if (arm_cg_config.big_endian) { in[0] = left_high; in[1] = left_low; in[2] = right_high; in[3] = right_low; } else { in[0] = left_low; in[1] = left_high; in[2] = right_low; in[3] = right_high; } ir_node *call = new_rd_Call(dbgi, block, mem, addr, ARRAY_SIZE(in), in, mtp); ir_node *resproj = new_r_Proj(call, mode_T, pn_Call_T_result); set_irn_pinned(call, get_irn_pinned(node)); foreach_out_edge_safe(node, edge) { ir_node *proj = get_edge_src_irn(edge); if (!is_Proj(proj)) continue; switch ((pn_Div)get_Proj_num(proj)) { case pn_Div_M: /* reroute to the call */ set_Proj_pred(proj, call); set_Proj_num(proj, pn_Call_M); break; case pn_Div_X_regular: set_Proj_pred(proj, call); set_Proj_num(proj, pn_Call_X_regular); break; case pn_Div_X_except: set_Proj_pred(proj, call); set_Proj_num(proj, pn_Call_X_except); break; case pn_Div_res: { ir_mode *low_mode = get_irn_mode(left_low); if (arm_cg_config.big_endian) { ir_node *res_low = new_r_Proj(resproj, low_mode, res_offset+1); ir_node *res_high = new_r_Proj(resproj, mode, res_offset); ir_set_dw_lowered(proj, res_low, res_high); } else { ir_node *res_low = new_r_Proj(resproj, low_mode, res_offset); ir_node *res_high = new_r_Proj(resproj, mode, res_offset+1); ir_set_dw_lowered(proj, res_low, res_high); } break; } } /* mark this proj: we have handled it already, otherwise we might fall * into out new nodes. */ mark_irn_visited(proj); }