int smaller_mode(const ir_mode *sm, const ir_mode *lm) { assert(sm != NULL); assert(lm != NULL); if (sm == lm) return true; switch (get_mode_sort(sm)) { case irms_int_number: switch (get_mode_sort(lm)) { case irms_int_number: if (get_mode_arithmetic(sm) != get_mode_arithmetic(lm)) return false; /* only two complement implemented */ assert(get_mode_arithmetic(sm) == irma_twos_complement); /* integers are convertable if * - both have the same sign and lm is the larger one * - lm is signed and is at least one bit larger (the sign) */ unsigned sm_bits = get_mode_size_bits(sm); unsigned lm_bits = get_mode_size_bits(lm); if (mode_is_signed(sm)) { if (!mode_is_signed(lm)) return false; } else { if (mode_is_signed(lm)) return sm_bits < lm_bits; } return sm_bits <= lm_bits; case irms_auxiliary: case irms_data: case irms_internal_boolean: case irms_reference: case irms_float_number: /* int to float works if the float is large enough */ return false; } panic("invalid mode_sort"); case irms_float_number: return get_mode_arithmetic(sm) == get_mode_arithmetic(lm) && mode_is_float(lm) && get_mode_size_bits(lm) >= get_mode_size_bits(sm); case irms_auxiliary: case irms_data: case irms_internal_boolean: case irms_reference: /* do exist machines out there with different pointer lengths ?*/ return false; } panic("invalid mode_sort"); }
/** * Transforms a Conv into the appropriate soft float function. */ static bool lower_Conv(ir_node *const n) { dbg_info *const dbgi = get_irn_dbg_info(n); ir_node *const block = get_nodes_block(n); ir_mode *const mode = get_irn_mode(n); ir_node *op = get_Conv_op(n); ir_mode *op_mode = get_irn_mode(op); char const *name; if (!mode_is_float(mode)) { if (!mode_is_float(op_mode)) return false; if (mode_is_signed(mode)) name = "fix"; else name = "fixuns"; } else if (!mode_is_float(op_mode)) { ir_mode *min_mode; if (mode_is_signed(op_mode)) { name = "float"; min_mode = mode_Is; } else { name = "floatun"; min_mode = mode_Iu; } if (get_mode_size_bits(op_mode) < get_mode_size_bits(min_mode)) { op_mode = min_mode; op = new_rd_Conv(dbgi, block, op, op_mode); } } else { /* Remove unnecessary Convs. */ if (op_mode == mode) { exchange(n, op); return true; } if (get_mode_size_bits(op_mode) > get_mode_size_bits(mode)) name = "trunc"; else name = "extend"; } ir_node *const in[] = { op }; ir_node *result = make_softfloat_call(n, name, ARRAY_SIZE(in), in); /* Check whether we need a Conv for the result. */ if (get_irn_mode(result) != mode) result = new_rd_Conv(dbgi, block, result, mode); exchange(n, result); return true; }
static carry_result lower_sub_borrow(ir_node *left, ir_node *right, ir_mode *mode) { assert(!mode_is_signed(mode)); bitinfo *bi_left = get_bitinfo(left); if (!bi_left) { return can_carry; } bitinfo *bi_right = get_bitinfo(right); // If we have bitinfo for one node, we should also have it for // the other assert(bi_right); ir_tarval *lmin = tarval_convert_to(bitinfo_min(bi_left), mode); ir_tarval *rmin = tarval_convert_to(bitinfo_min(bi_right), mode); ir_tarval *lmax = tarval_convert_to(bitinfo_max(bi_left), mode); ir_tarval *rmax = tarval_convert_to(bitinfo_max(bi_right), mode); carry_result result = no_carry; int old_wrap_on_overflow = tarval_get_wrap_on_overflow(); tarval_set_wrap_on_overflow(false); if (tarval_sub(lmin, rmax) == tarval_bad) { result = can_carry; if (tarval_sub(lmax, rmin) == tarval_bad) { result = must_carry; } } tarval_set_wrap_on_overflow(old_wrap_on_overflow); return result; }
int values_in_mode(const ir_mode *sm, const ir_mode *lm) { assert(sm != NULL); assert(lm != NULL); if (sm == lm) return true; if (sm == mode_b) return mode_is_int(lm) || mode_is_float(lm); ir_mode_arithmetic larith = get_mode_arithmetic(lm); ir_mode_arithmetic sarith = get_mode_arithmetic(sm); switch (larith) { case irma_x86_extended_float: case irma_ieee754: if (sarith == irma_ieee754 || sarith == irma_x86_extended_float) { return get_mode_size_bits(sm) <= get_mode_size_bits(lm); } else if (sarith == irma_twos_complement) { unsigned int_mantissa = get_mode_size_bits(sm) - (mode_is_signed(sm) ? 1 : 0); unsigned float_mantissa = get_mode_mantissa_size(lm) + 1; return int_mantissa <= float_mantissa; } break; case irma_twos_complement: if (sarith == irma_twos_complement) return get_mode_size_bits(sm) <= get_mode_size_bits(lm); break; case irma_none: break; } return false; }
static ir_entity *ia32_create_intrinsic_fkt(ir_type *method, const ir_op *op, const ir_mode *imode, const ir_mode *omode, void *context) { (void)omode; (void)context; const char *name; if (op == op_Div) { name = mode_is_signed(imode) ? "__divdi3" : "__udivdi3"; } else if (op == op_Mod) { name = mode_is_signed(imode) ? "__moddi3" : "__umoddi3"; } else { panic("ia32: Unexpected lowering of 64bit op %s", get_op_name(op)); } return create_compilerlib_entity(new_id_from_str(name), method); }
static void handle_intrinsic(ir_node *node, void *data) { (void)data; if (is_Div(node)) { ir_mode *mode = get_Div_resmode(node); if (get_mode_arithmetic(mode) == irma_twos_complement) { ir_entity *entity = mode_is_signed(mode) ? divsi3 : udivsi3; be_map_exc_node_to_runtime_call(node, mode, entity, pn_Div_M, pn_Div_X_regular, pn_Div_X_except, pn_Div_res); } } else if (is_Mod(node)) { ir_mode *mode = get_Mod_resmode(node); assert(get_mode_arithmetic(mode) == irma_twos_complement); ir_entity *entity = mode_is_signed(mode) ? modsi3 : umodsi3; be_map_exc_node_to_runtime_call(node, mode, entity, pn_Mod_M, pn_Mod_X_regular, pn_Mod_X_except, pn_Mod_res); } }
bool enum_bitfield_big_enough(enum_t *enume, type_t *base_type, unsigned bitfield_size) { ir_mode *mode = get_ir_mode_storage(base_type); ir_tarval *max = get_mode_max(mode); ir_tarval *min = get_mode_min(mode); bool is_signed = is_type_signed(base_type); unsigned mode_size = get_mode_size_bits(mode); unsigned shift_amount = mode_size - bitfield_size + is_signed; ir_tarval *adjusted_max; ir_tarval *adjusted_min; /* corner case: signed mode with just sign bit results in shift_amount * being as big as mode_size triggering "modulo shift" which is not what * we want here. */ if (shift_amount >= mode_size) { assert(bitfield_size == 1 && mode_is_signed(mode)); adjusted_max = get_mode_null(mode); adjusted_min = get_mode_all_one(mode); } else { adjusted_max = tarval_shr_unsigned(max, shift_amount); adjusted_min = tarval_shrs_unsigned(min, shift_amount); } for (entity_t *entry = enume->first_value; entry != NULL && entry->kind == ENTITY_ENUM_VALUE; entry = entry->base.next) { ir_tarval *tv = get_enum_value(&entry->enum_value); if (tv == NULL) continue; ir_tarval *tvc = tarval_convert_to(tv, mode); if (tarval_cmp(tvc, adjusted_min) == ir_relation_less || tarval_cmp(tvc, adjusted_max) == ir_relation_greater) { return false; } } return true; }
/** * lower 64bit conversions */ static void ia32_lower_conv64(ir_node *node, ir_mode *mode) { dbg_info *dbg = get_irn_dbg_info(node); ir_node *op = get_Conv_op(node); ir_mode *mode_from = get_irn_mode(op); ir_mode *mode_to = get_irn_mode(node); if (mode_is_float(mode_from) && get_mode_size_bits(mode_to) == 64 && get_mode_arithmetic(mode_to) == irma_twos_complement) { /* We have a Conv float -> long long here */ ir_node *float_to_ll; ir_node *l_res; ir_node *h_res; if (mode_is_signed(mode)) { /* convert from float to signed 64bit */ ir_node *block = get_nodes_block(node); float_to_ll = new_bd_ia32_l_FloattoLL(dbg, block, op); l_res = new_r_Proj(float_to_ll, ia32_mode_gp, pn_ia32_l_FloattoLL_res_low); h_res = new_r_Proj(float_to_ll, mode, pn_ia32_l_FloattoLL_res_high); } else { /* Convert from float to unsigned 64bit. */ ir_graph *irg = get_irn_irg(node); ir_tarval *flt_tv = new_tarval_from_str("9223372036854775808", 19, x86_mode_E); ir_node *flt_corr = new_r_Const(irg, flt_tv); ir_node *lower_blk = part_block_dw(node); ir_node *upper_blk = get_nodes_block(node); set_dw_control_flow_changed(); ir_node *opc = new_rd_Conv(dbg, upper_blk, op, x86_mode_E); ir_node *cmp = new_rd_Cmp(dbg, upper_blk, opc, flt_corr, ir_relation_less); ir_node *cond = new_rd_Cond(dbg, upper_blk, cmp); ir_node *in[] = { new_r_Proj(cond, mode_X, pn_Cond_true), new_r_Proj(cond, mode_X, pn_Cond_false) }; ir_node *blk = new_r_Block(irg, 1, &in[1]); in[1] = new_r_Jmp(blk); set_irn_in(lower_blk, 2, in); /* create to Phis */ ir_node *phi_in[] = { new_r_Const_null(irg, mode), new_r_Const_long(irg, mode, 0x80000000) }; ir_node *int_phi = new_r_Phi(lower_blk, ARRAY_SIZE(phi_in), phi_in, mode); ir_node *fphi_in[] = { opc, new_rd_Sub(dbg, upper_blk, opc, flt_corr, x86_mode_E) }; ir_node *flt_phi = new_r_Phi(lower_blk, ARRAY_SIZE(fphi_in), fphi_in, x86_mode_E); /* fix Phi links for next part_block() */ if (is_Phi(int_phi)) add_Block_phi(lower_blk, int_phi); if (is_Phi(flt_phi)) add_Block_phi(lower_blk, flt_phi); float_to_ll = new_bd_ia32_l_FloattoLL(dbg, lower_blk, flt_phi); l_res = new_r_Proj(float_to_ll, ia32_mode_gp, pn_ia32_l_FloattoLL_res_low); h_res = new_r_Proj(float_to_ll, mode, pn_ia32_l_FloattoLL_res_high); h_res = new_rd_Add(dbg, lower_blk, h_res, int_phi, mode); /* move the call and its Proj's to the lower block */ set_nodes_block(node, lower_blk); for (ir_node *proj = (ir_node*)get_irn_link(node); proj != NULL; proj = (ir_node*)get_irn_link(proj)) { set_nodes_block(proj, lower_blk); } } ir_set_dw_lowered(node, l_res, h_res); } else if (get_mode_size_bits(mode_from) == 64 && get_mode_arithmetic(mode_from) == irma_twos_complement && mode_is_float(mode_to)) { /* We have a Conv long long -> float here */ ir_node *op_low = get_lowered_low(op); ir_node *op_high = get_lowered_high(op); ir_node *block = get_nodes_block(node); ir_node *ll_to_float = new_bd_ia32_l_LLtoFloat(dbg, block, op_high, op_low, mode_to); exchange(node, ll_to_float); } else { ir_default_lower_dw_Conv(node, mode); } }
void amd64_emitf(ir_node const *const node, char const *fmt, ...) { va_list ap; va_start(ap, fmt); be_emit_char('\t'); for (;;) { char const *start = fmt; while (*fmt != '%' && *fmt != '\n' && *fmt != '\0') ++fmt; if (fmt != start) { be_emit_string_len(start, fmt - start); } if (*fmt == '\n') { be_emit_char('\n'); be_emit_write_line(); be_emit_char('\t'); ++fmt; continue; } if (*fmt == '\0') break; ++fmt; amd64_emit_mod_t mod = EMIT_NONE; for (;;) { switch (*fmt) { case '#': mod |= EMIT_RESPECT_LS; break; case '^': mod |= EMIT_IGNORE_MODE; break; default: goto end_of_mods; } ++fmt; } end_of_mods: switch (*fmt++) { arch_register_t const *reg; case '%': be_emit_char('%'); break; case 'C': { amd64_attr_t const *const attr = get_amd64_attr_const(node); /* FIXME: %d is a hack... we must emit 64bit constants, or sign * extended 32bit constants... */ be_emit_irprintf("$%d", attr->ext.imm_value); break; } case 'D': if (*fmt < '0' || '9' <= *fmt) goto unknown; reg = arch_get_irn_register_out(node, *fmt++ - '0'); goto emit_R; case 'E': { ir_entity const *const ent = va_arg(ap, ir_entity const*); be_gas_emit_entity(ent); break; } case 'L': { ir_node *const block = get_cfop_target_block(node); be_gas_emit_block_name(block); break; } case 'O': { amd64_SymConst_attr_t const *const attr = get_amd64_SymConst_attr_const(node); if (attr->fp_offset) be_emit_irprintf("%d", attr->fp_offset); break; } case 'R': reg = va_arg(ap, arch_register_t const*); emit_R: if (mod & EMIT_IGNORE_MODE) { emit_register(reg); } else { amd64_attr_t const *const attr = get_amd64_attr_const(node); if (mod & EMIT_RESPECT_LS) { emit_register_mode(reg, attr->ls_mode); } else { emit_register_insn_mode(reg, attr->data.insn_mode); } } break; case 'S': { int pos; if ('0' <= *fmt && *fmt <= '9') { pos = *fmt++ - '0'; } else { goto unknown; } reg = arch_get_irn_register_in(node, pos); goto emit_R; } case 'M': { amd64_attr_t const *const attr = get_amd64_attr_const(node); if (mod & EMIT_RESPECT_LS) { amd64_emit_mode_suffix(attr->ls_mode); } else { amd64_emit_insn_mode_suffix(attr->data.insn_mode); } break; } case 'd': { int const num = va_arg(ap, int); be_emit_irprintf("%d", num); break; } case 's': { char const *const str = va_arg(ap, char const*); be_emit_string(str); break; } case 'u': { unsigned const num = va_arg(ap, unsigned); be_emit_irprintf("%u", num); break; } case 'c': { amd64_attr_t const *const attr = get_amd64_attr_const(node); ir_mode *mode = attr->ls_mode; if (get_mode_size_bits(mode) == 64) break; if (get_mode_size_bits(mode) == 32 && !mode_is_signed(mode) && attr->data.insn_mode == INSN_MODE_32) break; be_emit_char(mode_is_signed(mode) ? 's' : 'z'); amd64_emit_mode_suffix(mode); break; } default: unknown: panic("unknown format conversion"); } } be_emit_finish_line_gas(node); va_end(ap); }
/** * @return The type of the function replacing the given node. */ static ir_type *get_softfloat_type(const ir_node *n) { ir_node *operand = get_irn_n(n, 0); ir_mode *operand_mode = get_irn_mode(operand); switch (get_irn_opcode(n)) { case iro_Div: operand_mode = get_irn_mode(get_Div_left(n)); /* fall through */ case iro_Add: case iro_Mul: case iro_Sub: if (operand_mode == mode_F) return binop_tp_f; else if (operand_mode == mode_D) return binop_tp_d; break; case iro_Cmp: if (operand_mode == mode_F) return cmp_tp_f; else if (operand_mode == mode_D) return cmp_tp_d; break; case iro_Conv: { ir_mode *const mode = get_irn_mode(n); if (operand_mode == mode_D) { if (mode == mode_F) return unop_tp_d_f; else if (get_mode_arithmetic(mode) == irma_twos_complement) { if (get_mode_size_bits(mode) <= 32) return mode_is_signed(mode) ? unop_tp_d_is : unop_tp_d_iu; else if (get_mode_size_bits(mode) == 64) return mode_is_signed(mode) ? unop_tp_d_ls : unop_tp_d_lu; } } else if (operand_mode == mode_F) { if (mode == mode_D) return unop_tp_f_d; else if (get_mode_arithmetic(mode) == irma_twos_complement) { if (get_mode_size_bits(mode) <= 32) return mode_is_signed(mode) ? unop_tp_f_is : unop_tp_f_iu; else if (get_mode_size_bits(mode) == 64) return mode_is_signed(mode) ? unop_tp_f_ls : unop_tp_f_lu; } } else if (get_mode_arithmetic(operand_mode) == irma_twos_complement) { if (mode_is_signed(operand_mode)) { if (get_mode_size_bits(operand_mode) <= 32) { if (mode == mode_D) return unop_tp_is_d; else if (mode == mode_F) return unop_tp_is_f; } else if (get_mode_size_bits(operand_mode) == 64) { if (mode == mode_D) return unop_tp_ls_d; else if (mode == mode_F) return unop_tp_ls_f; } } else { if (get_mode_size_bits(operand_mode) <= 32) { if (mode == mode_D) return unop_tp_iu_d; else if (mode == mode_F) return unop_tp_iu_f; } else if (get_mode_size_bits(operand_mode) == 64) { if (mode == mode_D) return unop_tp_lu_d; else if (mode == mode_F) return unop_tp_lu_f; } } } break; } case iro_Minus: if (operand_mode == mode_F) return unop_tp_f; else if (operand_mode == mode_D) return unop_tp_d; break; default: break; } panic("could not determine a suitable type"); }
static void lower_divmod(ir_node *node, ir_node *left, ir_node *right, ir_node *mem, ir_mode *mode, int res_offset) { dbg_info *dbgi = get_irn_dbg_info(node); ir_node *block = get_nodes_block(node); ir_node *left_low = get_lowered_low(left); ir_node *left_high = get_lowered_high(left); ir_node *right_low = get_lowered_low(right); ir_node *right_high = get_lowered_high(right); ir_mode *node_mode = get_irn_mode(left); ir_entity *entity = mode_is_signed(node_mode) ? ldivmod : uldivmod; ir_type *mtp = get_entity_type(entity); ir_graph *irg = get_irn_irg(node); ir_node *addr = new_r_Address(irg, entity); ir_node *in[4]; if (arm_cg_config.big_endian) { in[0] = left_high; in[1] = left_low; in[2] = right_high; in[3] = right_low; } else { in[0] = left_low; in[1] = left_high; in[2] = right_low; in[3] = right_high; } ir_node *call = new_rd_Call(dbgi, block, mem, addr, ARRAY_SIZE(in), in, mtp); ir_node *resproj = new_r_Proj(call, mode_T, pn_Call_T_result); set_irn_pinned(call, get_irn_pinned(node)); foreach_out_edge_safe(node, edge) { ir_node *proj = get_edge_src_irn(edge); if (!is_Proj(proj)) continue; switch ((pn_Div)get_Proj_num(proj)) { case pn_Div_M: /* reroute to the call */ set_Proj_pred(proj, call); set_Proj_num(proj, pn_Call_M); break; case pn_Div_X_regular: set_Proj_pred(proj, call); set_Proj_num(proj, pn_Call_X_regular); break; case pn_Div_X_except: set_Proj_pred(proj, call); set_Proj_num(proj, pn_Call_X_except); break; case pn_Div_res: { ir_mode *low_mode = get_irn_mode(left_low); if (arm_cg_config.big_endian) { ir_node *res_low = new_r_Proj(resproj, low_mode, res_offset+1); ir_node *res_high = new_r_Proj(resproj, mode, res_offset); ir_set_dw_lowered(proj, res_low, res_high); } else { ir_node *res_low = new_r_Proj(resproj, low_mode, res_offset); ir_node *res_high = new_r_Proj(resproj, mode, res_offset+1); ir_set_dw_lowered(proj, res_low, res_high); } break; } } /* mark this proj: we have handled it already, otherwise we might fall * into out new nodes. */ mark_irn_visited(proj); }
int smaller_mode(const ir_mode *sm, const ir_mode *lm) { int sm_bits, lm_bits; assert(sm); assert(lm); if (sm == lm) return 1; sm_bits = get_mode_size_bits(sm); lm_bits = get_mode_size_bits(lm); switch (get_mode_sort(sm)) { case irms_int_number: switch (get_mode_sort(lm)) { case irms_int_number: if (get_mode_arithmetic(sm) != get_mode_arithmetic(lm)) return 0; /* only two complement implemented */ assert(get_mode_arithmetic(sm) == irma_twos_complement); /* integers are convertable if * - both have the same sign and lm is the larger one * - lm is the signed one and is at least two bits larger * (one for the sign, one for the highest bit of sm) * - sm & lm are two_complement and lm has greater or equal number of bits */ if (mode_is_signed(sm)) { if (!mode_is_signed(lm)) return 0; return sm_bits <= lm_bits; } else { if (mode_is_signed(lm)) { return sm_bits < lm_bits; } return sm_bits <= lm_bits; } case irms_float_number: /* int to float works if the float is large enough */ return 0; default: break; } break; case irms_float_number: if (get_mode_arithmetic(sm) == get_mode_arithmetic(lm)) { if ( (get_mode_sort(lm) == irms_float_number) && (get_mode_size_bits(lm) >= get_mode_size_bits(sm)) ) return 1; } break; case irms_reference: /* do exist machines out there with different pointer lengths ?*/ return 0; case irms_internal_boolean: return mode_is_int(lm); default: break; } /* else */ return 0; }
/** check, whether a mode allows a Mulh instruction. */ static int allow_Mulh(const ir_settings_arch_dep_t *params, ir_mode *mode) { if (get_mode_size_bits(mode) > params->max_bits_for_mulh) return 0; return mode_is_signed(mode) ? params->allow_mulhs : params->allow_mulhu; }