ir_node *get_atomic_ent_value(const ir_entity *entity) { ir_initializer_t *initializer = get_entity_initializer(entity); assert(is_atomic_entity(entity)); if (initializer == NULL) { ir_type *type = get_entity_type(entity); return new_r_Unknown(get_const_code_irg(), get_type_mode(type)); } switch (get_initializer_kind(initializer)) { case IR_INITIALIZER_NULL: { ir_type *type = get_entity_type(entity); ir_mode *mode = get_type_mode(type); return new_r_Const(get_const_code_irg(), get_mode_null(mode)); } case IR_INITIALIZER_TARVAL: { ir_tarval *tv = get_initializer_tarval_value(initializer); return new_r_Const(get_const_code_irg(), tv); } case IR_INITIALIZER_CONST: return get_initializer_const_value(initializer); case IR_INITIALIZER_COMPOUND: panic("compound initializer in atomic entity not allowed (%+F)", entity); } panic("invalid initializer kind (%+F)", entity); }
/** * Adjust the size of a node representing a stack alloc to a certain * stack_alignment. * * @param size the node containing the non-aligned size * @param block the block where new nodes are allocated on * @return a node representing the aligned size */ static ir_node *adjust_alloc_size(dbg_info *dbgi, ir_node *size, ir_node *block) { /* Example: po2_alignment 4 (align to 16 bytes): * size = (size+15) & 0xfff...f8 */ ir_mode *mode = get_irn_mode(size); ir_graph *irg = get_irn_irg(block); ir_tarval *allone = get_mode_all_one(mode); ir_tarval *shr = tarval_shr_unsigned(allone, po2_stack_alignment); ir_tarval *mask = tarval_shl_unsigned(shr, po2_stack_alignment); ir_tarval *invmask = tarval_not(mask); ir_node *addv = new_r_Const(irg, invmask); ir_node *add = new_rd_Add(dbgi, block, size, addv); ir_node *maskc = new_r_Const(irg, mask); ir_node *and = new_rd_And(dbgi, block, add, maskc); return and; }
/** * Adjust the size of a node representing a stack alloc to a certain * stack_alignment. * * @param size the node containing the non-aligned size * @param block the block where new nodes are allocated on * @return a node representing the aligned size */ static ir_node *adjust_alloc_size(dbg_info *dbgi, ir_node *size, ir_node *block) { if (stack_alignment <= 1) return size; if (is_Const(size) && !lower_constant_sizes) return size; ir_mode *mode = get_irn_mode(size); ir_tarval *tv = new_tarval_from_long(stack_alignment-1, mode); ir_graph *irg = get_Block_irg(block); ir_node *mask = new_r_Const(irg, tv); size = new_rd_Add(dbgi, block, size, mask, mode); tv = new_tarval_from_long(-(long)stack_alignment, mode); mask = new_r_Const(irg, tv); size = new_rd_And(dbgi, block, size, mask, mode); return size; }
static ir_node *gcji_get_arrayclass(ir_node *block, ir_node **mem, ir_node *array_class_ref) { ir_graph *irg = get_irn_irg(block); ir_node *addr = new_r_Address(irg, gcj_get_array_class_entity); ir_node *null = new_r_Const(irg, get_mode_null(mode_reference)); ir_node *args[] = { array_class_ref, null }; ir_type *call_type = get_entity_type(gcj_get_array_class_entity); ir_node *call = new_r_Call(block, *mem, addr, ARRAY_SIZE(args), args, call_type); ir_node *new_mem = new_Proj(call, mode_M, pn_Call_M); ir_node *ress = new_Proj(call, mode_T, pn_Call_T_result); ir_node *res = new_Proj(ress, mode_reference, 0); *mem = new_mem; return res; }
static ir_node *gcji_instanceof(ir_node *objptr, ir_type *classtype, ir_graph *irg, ir_node *block, ir_node **mem) { ir_node *jclass = gcji_get_runtime_classinfo_(block, mem, classtype); ir_node *addr = new_r_Address(irg, gcj_instanceof_entity); ir_node *args[] = { objptr, jclass }; ir_type *call_type = get_entity_type(gcj_instanceof_entity); ir_node *call = new_r_Call(block, *mem, addr, ARRAY_SIZE(args), args, call_type); ir_node *new_mem = new_r_Proj(call, mode_M, pn_Call_M); ir_node *ress = new_r_Proj(call, mode_T, pn_Call_T_result); ir_node *call_res = new_r_Proj(ress, mode_int, 0); ir_node *zero = new_r_Const(irg, get_mode_null(mode_int)); ir_node *res = new_r_Cmp(block, call_res, zero, ir_relation_less_greater); *mem = new_mem; return res; }
/** * lower 64bit conversions */ static void ia32_lower_conv64(ir_node *node, ir_mode *mode) { dbg_info *dbg = get_irn_dbg_info(node); ir_node *op = get_Conv_op(node); ir_mode *mode_from = get_irn_mode(op); ir_mode *mode_to = get_irn_mode(node); if (mode_is_float(mode_from) && get_mode_size_bits(mode_to) == 64 && get_mode_arithmetic(mode_to) == irma_twos_complement) { /* We have a Conv float -> long long here */ ir_node *float_to_ll; ir_node *l_res; ir_node *h_res; if (mode_is_signed(mode)) { /* convert from float to signed 64bit */ ir_node *block = get_nodes_block(node); float_to_ll = new_bd_ia32_l_FloattoLL(dbg, block, op); l_res = new_r_Proj(float_to_ll, ia32_mode_gp, pn_ia32_l_FloattoLL_res_low); h_res = new_r_Proj(float_to_ll, mode, pn_ia32_l_FloattoLL_res_high); } else { /* Convert from float to unsigned 64bit. */ ir_graph *irg = get_irn_irg(node); ir_tarval *flt_tv = new_tarval_from_str("9223372036854775808", 19, x86_mode_E); ir_node *flt_corr = new_r_Const(irg, flt_tv); ir_node *lower_blk = part_block_dw(node); ir_node *upper_blk = get_nodes_block(node); set_dw_control_flow_changed(); ir_node *opc = new_rd_Conv(dbg, upper_blk, op, x86_mode_E); ir_node *cmp = new_rd_Cmp(dbg, upper_blk, opc, flt_corr, ir_relation_less); ir_node *cond = new_rd_Cond(dbg, upper_blk, cmp); ir_node *in[] = { new_r_Proj(cond, mode_X, pn_Cond_true), new_r_Proj(cond, mode_X, pn_Cond_false) }; ir_node *blk = new_r_Block(irg, 1, &in[1]); in[1] = new_r_Jmp(blk); set_irn_in(lower_blk, 2, in); /* create to Phis */ ir_node *phi_in[] = { new_r_Const_null(irg, mode), new_r_Const_long(irg, mode, 0x80000000) }; ir_node *int_phi = new_r_Phi(lower_blk, ARRAY_SIZE(phi_in), phi_in, mode); ir_node *fphi_in[] = { opc, new_rd_Sub(dbg, upper_blk, opc, flt_corr, x86_mode_E) }; ir_node *flt_phi = new_r_Phi(lower_blk, ARRAY_SIZE(fphi_in), fphi_in, x86_mode_E); /* fix Phi links for next part_block() */ if (is_Phi(int_phi)) add_Block_phi(lower_blk, int_phi); if (is_Phi(flt_phi)) add_Block_phi(lower_blk, flt_phi); float_to_ll = new_bd_ia32_l_FloattoLL(dbg, lower_blk, flt_phi); l_res = new_r_Proj(float_to_ll, ia32_mode_gp, pn_ia32_l_FloattoLL_res_low); h_res = new_r_Proj(float_to_ll, mode, pn_ia32_l_FloattoLL_res_high); h_res = new_rd_Add(dbg, lower_blk, h_res, int_phi, mode); /* move the call and its Proj's to the lower block */ set_nodes_block(node, lower_blk); for (ir_node *proj = (ir_node*)get_irn_link(node); proj != NULL; proj = (ir_node*)get_irn_link(proj)) { set_nodes_block(proj, lower_blk); } } ir_set_dw_lowered(node, l_res, h_res); } else if (get_mode_size_bits(mode_from) == 64 && get_mode_arithmetic(mode_from) == irma_twos_complement && mode_is_float(mode_to)) { /* We have a Conv long long -> float here */ ir_node *op_low = get_lowered_low(op); ir_node *op_high = get_lowered_high(op); ir_node *block = get_nodes_block(node); ir_node *ll_to_float = new_bd_ia32_l_LLtoFloat(dbg, block, op_high, op_low, mode_to); exchange(node, ll_to_float); } else { ir_default_lower_dw_Conv(node, mode); } }
/** * Lower a Sel node. Do not touch Sels accessing entities on the frame type. */ static void lower_sel(ir_node *sel) { ir_graph *irg = get_irn_irg(sel); ir_entity *ent = get_Sel_entity(sel); ir_type *owner = get_entity_owner(ent); dbg_info *dbg = get_irn_dbg_info(sel); ir_mode *mode = get_irn_mode(sel); ir_node *bl = get_nodes_block(sel); ir_node *newn; /* we can only replace Sels when the layout of the owner type is decided. */ if (get_type_state(owner) != layout_fixed) return; if (0 < get_Sel_n_indexs(sel)) { /* an Array access */ ir_type *basetyp = get_entity_type(ent); ir_mode *basemode; ir_node *index; if (is_Primitive_type(basetyp)) basemode = get_type_mode(basetyp); else basemode = mode_P_data; assert(basemode && "no mode for lowering Sel"); assert((get_mode_size_bits(basemode) % 8 == 0) && "can not deal with unorthodox modes"); index = get_Sel_index(sel, 0); if (is_Array_type(owner)) { ir_type *arr_ty = owner; size_t dims = get_array_n_dimensions(arr_ty); size_t *map = ALLOCAN(size_t, dims); ir_mode *mode_Int = get_reference_mode_signed_eq(mode); ir_tarval *tv; ir_node *last_size; size_t i; assert(dims == (size_t)get_Sel_n_indexs(sel) && "array dimension must match number of indices of Sel node"); for (i = 0; i < dims; i++) { size_t order = get_array_order(arr_ty, i); assert(order < dims && "order of a dimension must be smaller than the arrays dim"); map[order] = i; } newn = get_Sel_ptr(sel); /* Size of the array element */ tv = new_tarval_from_long(get_type_size_bytes(basetyp), mode_Int); last_size = new_rd_Const(dbg, irg, tv); /* * We compute the offset part of dimension d_i recursively * with the the offset part of dimension d_{i-1} * * off_0 = sizeof(array_element_type); * off_i = (u_i - l_i) * off_{i-1} ; i >= 1 * * whereas u_i is the upper bound of the current dimension * and l_i the lower bound of the current dimension. */ for (i = dims; i > 0;) { size_t dim = map[--i]; ir_node *lb, *ub, *elms, *n, *ind; elms = NULL; lb = get_array_lower_bound(arr_ty, dim); ub = get_array_upper_bound(arr_ty, dim); if (! is_Unknown(lb)) lb = new_rd_Conv(dbg, bl, copy_const_value(get_irn_dbg_info(sel), lb, bl), mode_Int); else lb = NULL; if (! is_Unknown(ub)) ub = new_rd_Conv(dbg, bl, copy_const_value(get_irn_dbg_info(sel), ub, bl), mode_Int); else ub = NULL; /* * If the array has more than one dimension, lower and upper * bounds have to be set in the non-last dimension. */ if (i > 0) { assert(lb != NULL && "lower bound has to be set in multi-dim array"); assert(ub != NULL && "upper bound has to be set in multi-dim array"); /* Elements in one Dimension */ elms = new_rd_Sub(dbg, bl, ub, lb, mode_Int); } ind = new_rd_Conv(dbg, bl, get_Sel_index(sel, dim), mode_Int); /* * Normalize index, id lower bound is set, also assume * lower bound == 0 */ if (lb != NULL) ind = new_rd_Sub(dbg, bl, ind, lb, mode_Int); n = new_rd_Mul(dbg, bl, ind, last_size, mode_Int); /* * see comment above. */ if (i > 0) last_size = new_rd_Mul(dbg, bl, last_size, elms, mode_Int); newn = new_rd_Add(dbg, bl, newn, n, mode); } } else { /* no array type */ ir_mode *idx_mode = get_irn_mode(index); ir_tarval *tv = new_tarval_from_long(get_mode_size_bytes(basemode), idx_mode); newn = new_rd_Add(dbg, bl, get_Sel_ptr(sel), new_rd_Mul(dbg, bl, index, new_r_Const(irg, tv), idx_mode), mode); } } else if (is_Method_type(get_entity_type(ent)) && is_Class_type(owner)) { /* We need an additional load when accessing methods from a dispatch * table. * Matze TODO: Is this really still used? At least liboo does its own * lowering of Method-Sels... */ ir_mode *ent_mode = get_type_mode(get_entity_type(ent)); int offset = get_entity_offset(ent); ir_mode *mode_Int = get_reference_mode_signed_eq(mode); ir_tarval *tv = new_tarval_from_long(offset, mode_Int); ir_node *cnst = new_rd_Const(dbg, irg, tv); ir_node *add = new_rd_Add(dbg, bl, get_Sel_ptr(sel), cnst, mode); ir_node *mem = get_Sel_mem(sel); newn = new_rd_Load(dbg, bl, mem, add, ent_mode, cons_none); newn = new_r_Proj(newn, ent_mode, pn_Load_res); } else { int offset = get_entity_offset(ent); /* replace Sel by add(obj, const(ent.offset)) */ newn = get_Sel_ptr(sel); if (offset != 0) { ir_mode *mode_UInt = get_reference_mode_unsigned_eq(mode); ir_tarval *tv = new_tarval_from_long(offset, mode_UInt); ir_node *cnst = new_r_Const(irg, tv); newn = new_rd_Add(dbg, bl, newn, cnst, mode); } } /* run the hooks */ hook_lower(sel); exchange(sel, newn); }
/** * Lower a all possible SymConst nodes. */ static void lower_symconst(ir_node *symc) { ir_node *newn; ir_type *tp; ir_entity *ent; ir_tarval *tv; ir_enum_const *ec; ir_mode *mode; ir_graph *irg; switch (get_SymConst_kind(symc)) { case symconst_type_size: /* rewrite the SymConst node by a Const node */ irg = get_irn_irg(symc); tp = get_SymConst_type(symc); assert(get_type_state(tp) == layout_fixed); mode = get_irn_mode(symc); newn = new_r_Const_long(irg, mode, get_type_size_bytes(tp)); assert(newn); /* run the hooks */ hook_lower(symc); exchange(symc, newn); break; case symconst_type_align: /* rewrite the SymConst node by a Const node */ irg = get_irn_irg(symc); tp = get_SymConst_type(symc); assert(get_type_state(tp) == layout_fixed); mode = get_irn_mode(symc); newn = new_r_Const_long(irg, mode, get_type_alignment_bytes(tp)); assert(newn); /* run the hooks */ hook_lower(symc); exchange(symc, newn); break; case symconst_addr_ent: /* leave */ break; case symconst_ofs_ent: /* rewrite the SymConst node by a Const node */ irg = get_irn_irg(symc); ent = get_SymConst_entity(symc); assert(get_type_state(get_entity_type(ent)) == layout_fixed); mode = get_irn_mode(symc); newn = new_r_Const_long(irg, mode, get_entity_offset(ent)); assert(newn); /* run the hooks */ hook_lower(symc); exchange(symc, newn); break; case symconst_enum_const: /* rewrite the SymConst node by a Const node */ irg = get_irn_irg(symc); ec = get_SymConst_enum(symc); assert(get_type_state(get_enumeration_owner(ec)) == layout_fixed); tv = get_enumeration_value(ec); newn = new_r_Const(irg, tv); assert(newn); /* run the hooks */ hook_lower(symc); exchange(symc, newn); break; default: assert(!"unknown SymConst kind"); break; } }